diff --git a/.gitattributes b/.gitattributes index 17316a9b2af7956899c73cdb0243941ddc9b3752..e8c6df2602e1b2b0ca97fa33fd4f00276eae8a85 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1069,3 +1069,11 @@ data/2025/2504_13xxx/2504.13650/709aba9f-dad4-48b4-a551-3edbed08a781_origin.pdf data/2025/2504_13xxx/2504.13707/3ceae00c-ae80-4a3a-854a-18a112f05be2_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_13xxx/2504.13837/64f2b5fa-c253-4510-b5d2-ad303831a936_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_16xxx/2504.16113/1e364459-1cd0-4c1a-a05b-130dcb3e3873_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_content_list.json b/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3542e02e9a753b217e83e0c993575efb2c9270fb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_content_list.json @@ -0,0 +1,3305 @@ +[ + { + "type": "text", + "text": "Sleep-time Compute: Beyond Inference Scaling at Test-time", + "text_level": 1, + "bbox": [ + 112, + 119, + 758, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kevin Lin $^{1*}$ Charlie Snell $^{2*}$", + "bbox": [ + 112, + 165, + 339, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yu Wang $^{1}$ Charles Packer $^{1}$ Sarah Wooders $^{1}$ Ion Stoica $^{1,2}$ Joseph E. Gonzalez $^{1,2}$", + "bbox": [ + 112, + 181, + 785, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Letta $^{2}$ University of California, Berkeley", + "bbox": [ + 114, + 204, + 433, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "research@letta.com", + "bbox": [ + 114, + 227, + 267, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 280, + 540, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Scaling test-time compute has emerged as a key ingredient for enabling large language models (LLMs) to solve difficult problems, but comes with high latency and inference cost. We introduce sleep-time compute, which allows models to \"think\" offline about contexts before queries are presented: by anticipating what queries users might ask and pre-computing useful quantities, we can significantly reduce the compute requirements at test-time. To demonstrate the efficacy of our method, we create modified versions of two reasoning tasks – Stateful GSM-Symbolic and Stateful AIME. We find that sleep-time compute can reduce the amount of test-time compute needed to achieve the same accuracy by $\\sim 5\\times$ on Stateful GSM-Symbolic and Stateful AIME and that by scaling sleep-time compute we can further increase accuracy by up to $13\\%$ on Stateful GSM-Symbolic and $18\\%$ on Stateful AIME. Furthermore, we introduce Multi-Query GSM-Symbolic, which extends GSM-Symbolic by including multiple related queries per context. By amortizing sleep-time compute across related queries about the same context using Multi-Query GSM-Symbolic, we can decrease the average cost per query by $2.5\\times$ . We then conduct additional analysis to understand when sleep-time compute is most effective, finding the predictability of the user query to be well correlated with the efficacy of sleep-time compute. Finally, we conduct a case-study of applying sleep-time compute to a realistic agentic SWE task. Code and data released at: https://github.com/letta-ai/sleep-time-compute.", + "bbox": [ + 109, + 308, + 887, + 554 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 575, + 261, + 590 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Test-time scaling has emerged as an effective way to boost LLM performance on challenging tasks by spending more time thinking on difficult problems (OpenAI, 2024; DeepSeek-AI, 2024; Snell et al., 2024; Brown et al., 2024). However, improved performance from test-time compute comes at a significant increase in latency and cost, waiting potentially several minutes for answers and costing up to tens of dollars per query. These drawbacks are in part due to the fact that the current approach to applying test-time compute assumes that problems are stateless, i.e. queries (user queries at test-time) and the contexts (background information) required for answering them are provided to the model together at \"test-time.\" In practice, this means that if multiple related queries require making similar inferences about the context at \"test-time,\" the model will have to recompute redundant computations each time, incurring additional latency and cost.", + "bbox": [ + 109, + 608, + 885, + 755 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In reality, many LLM applications are inherently stateful, and work in conjunction with persisted, re-used context. A classic example is document question-answering, where documents contextualize responses to questions. Coding agents also operate on a large common repository and participate in multiple rounds of debugging support, while conversational assistants need to maintain the past dialogue. In all these applications, there is context (available documents, a codebase, or conversation history) that is already available before the next user input.", + "bbox": [ + 109, + 763, + 883, + 862 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 112, + 74, + 140, + 95 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Letta", + "bbox": [ + 147, + 75, + 196, + 93 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://platform.openai.com/docs/models/o1-pro", + "bbox": [ + 132, + 869, + 482, + 886 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13171v1 [cs.AI] 17 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5a38081906dbbd164cc71bf746dcc600f00469488a4a5807bc51df37e57c9c21.jpg", + "image_caption": [ + "Figure 1: Example of applying sleep-time compute on Multi-Query GSM-Symbolic-P1. Sleep-time compute processes the original raw context, adding additional computations that can potentially be useful for future queries. Moreover, contexts can be shared across related queries enabling savings in total cost per query." + ], + "image_footnote": [], + "bbox": [ + 127, + 138, + 875, + 494 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In these settings, we could in principle, make useful inferences about the current state (context) offline before, or even during the user's next input. We refer to such a process, as sleep-time compute: where inference is done between interactions with the model while it would otherwise be idle in sleep-time. In practice, this is achieved by prompting the model to generate a new context consisting of inferences about the existing context, which may be potentially useful for answering test-time queries. The re-represented context from sleep-time can then be provided in the prompt at test-time, enabling the model to respond to user queries at the accuracy of standard test-time compute but with far lower latencies. For example, a coding assistant at sleep-time may identify architectural patterns, anticipate potential debugging strategies, or infer optimizations prior to the user input. Moreover, users might ask multiple queries about the same context. In these settings, any inferences made during sleep-time can be shared across queries, effectively amortizing the cost of sleep-time compute and reducing the total average cost per query.", + "bbox": [ + 109, + 595, + 883, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To evaluate sleep-time compute, we modify two mathematical reasoning datasets to introduce two datasets – Stateful GSM-Symbolic and Stateful AIME – by splitting the existing problems in these datasets into a context and a question. Using these datasets, we aim to empirically understand the benefits of sleep-time compute on standard test-time compute benchmarks. We show that:", + "bbox": [ + 109, + 782, + 883, + 848 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Sleep-time compute produces a pareto improvement in the test-time compute vs. accuracy curve, reducing the test-time compute needed to achieve the same accuracy by $\\sim 5\\times$ on Stateful GSM-Symbolic and Stateful AIME.", + "bbox": [ + 153, + 859, + 883, + 909 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 491, + 931, + 504, + 944 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- By scaling up sleep-time compute, we see further pareto improvements, shifting the accuracy up by $13\\%$ on Stateful GSM-Symbolic and $18\\%$ on Stateful AIME.", + "- By amortizing sleep-time compute across multiple queries for the same context, we can reduce the average cost per question by $2.5 \\times$ .", + "- We conduct analysis to understand which queries benefit the most from sleep-time compute, finding that sleep-time compute is more effective in settings where the query is more easily predictable from the context." + ], + "bbox": [ + 153, + 132, + 879, + 243 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, we end with case study of applying sleep-time compute to reduce test-time compute in a realistic agentic software engineering task.", + "bbox": [ + 111, + 260, + 883, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 112, + 316, + 269, + 333 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scaling test-time compute. Our work builds on recent progress on scaling up computation at test-time for difficult reasoning problems (Snell et al., 2024; DeepSeek-AI, 2024; OpenAI, 2024). Two predominant approaches to test-time scaling have emerged: sequential test-time scaling (OpenAI, 2024; DeepSeek-AI, 2024; Muennighoff et al., 2025; Snell et al., 2024) and parallel test-time scaling (Brown et al., 2024; Snell et al., 2024). While sequential test-time scaling has demonstrated impressive performance improvements, parallel test-time scaling has the advantage of scaling test-time compute without increasing latency. In constraint, we propose an alternative dimension where existing advancements in test-time compute, both sequential and parallel can be applied. Namely, instead of performing inference purely at test-time, we leverage compute on contexts that are available before the actual query arrives.", + "bbox": [ + 111, + 353, + 883, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Speculative decoding in LLMs. Speculative decoding is a standard technique for reducing latency in decoding with LLMs (Leviathan et al., 2023; Stern et al., 2018; Cai et al., 2024; DeepSeek-AI et al., 2025). Sleep-time compute similarly targets reducing reasoning latency by speculating on the user's query as well as any potentially helpful reasoning over the context. However, unlike speculative decoding, the generated tokens are used as an input regardless of the user's actual query, and at test-time the reasoning model uses these generated tokens to help answer the user query more efficiently.", + "bbox": [ + 111, + 518, + 883, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Pre-computation. Beyond LLMs, a long history of work has explored the trade-off between pre-computation and memory (eg. memory caches Smith (1982) and data cubes for OLAP workloads Gray et al. (1997)). Our work explores the same trade-off between query latency and pre-computation overhead, operating under the assumption that query workload patterns can be reasonably anticipated in advance. sleep-time compute builds on the idea of pre-fetching in traditional operating systems, in the context of LLMs à la Packer et al. (2023), storing frequently used computational results to avoid higher latency at test-time.", + "bbox": [ + 111, + 636, + 883, + 750 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Sleep-time Compute", + "text_level": 1, + "bbox": [ + 112, + 775, + 333, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the standard paradigm of applying test-time compute, a user inputs a prompt $p$ to the LLM and then the LLM applies test-time compute to help answer the user's question. However, the $p$ provided to the LLM can oftentimes be decomposed into a pre-existing context $c$ (eg. a codebase) and a user query $q$ (eg. a question about the codebase). When the LLM is not actively responding to the user, it typically still has access to the existing context $c$ . During this time, the LLM is typically idling, missing the opportunity to reason about $c$ offline: a process we term sleep-time compute.", + "bbox": [ + 111, + 811, + 883, + 907 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 491, + 931, + 504, + 944 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Test-time compute. In the test-time compute setting, the user provides $q$ along with some context $c$ and the model outputs a reasoning trace followed by a final answer $a$ . We denote this process, as: $T_{B}(q,c) \\to a$ , where $T$ is the method for using test-time compute with budget $B$ , which could include techniques like extended chains of thought or best-of-N. In practice, the user may have multiple queries about the same context $q_{1}, q_{2} \\ldots q_{N}$ . In this setting, the model will carry out independent reasoning processes for each $q_{i}$ , even if they are related to the same context $c$ . Ideally, we would be able to reuse related inferences across each $q_{i}$ to save compute. Moreover, in many cases, $c$ is complex and may require carrying out significant processing/inferences in order to provide an answer to $q$ . Since, the test-time compute paradigm of $T(q,c) \\to a$ assumes that $c$ is only available at the same time as $q$ , standard test-time compute carries out all of these inferences only after the user provides the query, causing the user to wait up to several minutes for a response. However, in practice we often have access to $c$ before $q$ and can carry out much of this processing ahead of time.", + "bbox": [ + 109, + 132, + 887, + 330 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Sleep-time compute. During sleep-time we are given the context $c$ but not the query $q$ . Using just this context $c$ , we can use the LLM to infer likely questions and reason about the context ultimately producing a more new re-represented context $c'$ . We denote this process as: $S(c) \\to c'$ , where $S$ can be any standard test-time scaling technique applied towards pre-processing the context at sleep-time. In this work, $S(c)$ is implemented by prompting the model to draw inferences and re-write $c$ in a way that might be useful at test-time (see Appendix K for more details). After pre-processing the context, we can provide the new context $c'$ at test-time in place of $c$ to produce a final answer to the user's query: $T_b(q, c') \\to a$ . Since much of the reasoning about $c$ has been done ahead of time in this case, we can use a much smaller test-time budget $b < < B$ . Moreover, $c'$ can be shared across different queries $q_i$ about the same context, effectively amortizing the compute required to arrive at $c'$ across queries, providing a total cost saving.", + "bbox": [ + 109, + 345, + 888, + 510 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Experimental Setup", + "text_level": 1, + "bbox": [ + 112, + 531, + 323, + 549 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, we describe the datasets, models, and baselines we use to evaluate sleep-time compute.", + "bbox": [ + 111, + 566, + 790, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Datasets", + "text_level": 1, + "bbox": [ + 112, + 601, + 218, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We select datasets which represent standard benchmarks for LLM reasoning and test-time scaling, and which demonstrate improvements from scaling test-time compute with state-of-the-art LLMs (either reasoning or non-reasoning).", + "bbox": [ + 109, + 628, + 885, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stateful datasets. We introduce two datasets to study applying sleep-time compute in stateful settings, Stateful GSM-Symbolic, and Stateful AIME, where each dataset is derived from splitting the existing datasets into a context and a question (see Figure 2 for an example). Stateful GSM-Symbolic is derived from the P1 and P2 splits of GSM-Symbolic (Mirzadeh et al., 2024), which add one and two clauses respectively to the original GSM8K dataset (Cobbe et al., 2021) to that increase the difficulty. GSM-Symbolic P1 contains 5000 examples and P2 2500 examples. Stateful AIME contains 60 questions combined from AIME 2024 and 2025. In Appendix L and M, we show the breakdown of our results across AIME 2024 and 2025.", + "bbox": [ + 109, + 696, + 887, + 809 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Amortization dataset. To study the effect of related questions that share context, we introduce a new dataset Multi-Query GSM-Symbolic, where each context has multiple queries. To generate multiple queries for a given context, we take Stateful GSM-Symbolic and use o3-mini to generate additional question answer pairs. We synthetically generate additional questions from existing context question pairs in GSM-Symbolic. Appendix C shows the prompt used to generate the additional questions. Figure 20 shows examples contexts", + "bbox": [ + 109, + 827, + 885, + 910 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 931, + 504, + 944 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/44bf21a5b475305d2d6b9b0740d2c512fba800c682255166c3f572382f1b7504.jpg", + "image_caption": [ + "Figure 2: Example of separating an instance from GSM-Symbolic into context, and question, creating an instance in Stateful GSM-Symbolic." + ], + "image_footnote": [], + "bbox": [ + 130, + 138, + 869, + 361 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and set of questions from the Multi-Query GSM-Symbolic dataset and Table C shows the overall dataset statistics.", + "bbox": [ + 111, + 449, + 883, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Models and Baselines", + "text_level": 1, + "bbox": [ + 112, + 501, + 316, + 515 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Models. On each dataset, we evaluate models which have poor performance when using a small amount of test-time compute, but yield improvements from scaling up test-time compute. Therefore, on GSM-Symbolic, we conduct experiments using GPT-4o-mini and GPT-4o, and on AIME, we conduct experiments using OpenAI's o1, o3-mini, Anthropic's Claude Sonnet 3.7 Extended Thinking , and Deepseek-R1 (DeepSeek-AI, 2024). ${}^{2}{}^{3}$", + "bbox": [ + 111, + 530, + 883, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines The main baseline we consider is the standard test-time compute setting in which both $c$ and $q$ are presented to the model for the first time at test-time. Furthermore, to validate that $q$ is not trivially predictable from $c$ on our Stateful GSM-Symbolic and Stateful AIME datasets, we also compare to a context-only baseline in Appendix I, in which the model is only given $c$ and is tasked with directly guessing an answer to the question it guesses is most likely to come next.", + "bbox": [ + 111, + 630, + 883, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Experiments and Results", + "text_level": 1, + "bbox": [ + 112, + 734, + 369, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we carry out experiments to understand the benefits of sleep-time compute. Specifically, we would like to answer each of the following questions using the math reasoning benchmarks introduced above:", + "bbox": [ + 111, + 771, + 883, + 818 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Can sleep-time compute shift the pareto frontier of test-time compute vs. accuracy?", + "2. Does scaling sleep-time compute in-turn improve the pareto further?" + ], + "bbox": [ + 148, + 834, + 772, + 867 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2https://openai.com/o1/", + "bbox": [ + 132, + 878, + 307, + 893 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3https://www.anthropic.com/claudi/sonnet", + "bbox": [ + 132, + 893, + 434, + 909 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 931, + 504, + 943 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f255f5c72f7c2c817bc5622b90c2487cac7aa2f92a0318cf62521fc01ba392d0.jpg", + "image_caption": [ + "Figure 3: The test-time compute vs. accuracy tradeoff for on Stateful GSM-Symbolic. Shaded area indicates where sleep-time compute improves the pareto test-time accuracy trade-off." + ], + "image_footnote": [ + "--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute" + ], + "bbox": [ + 117, + 130, + 500, + 349 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/be80dda675321e25df4853e70378c9a6d7ccffd53f6fa1a8cb060753c2ea4f34.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 132, + 879, + 349 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3. When there are multiple related questions for a single context, can amortizing test-time compute with sleep-time compute provide a total token efficiency benefit?", + "4. In what settings does sleep-time compute provide the most uplift?" + ], + "bbox": [ + 148, + 455, + 883, + 506 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Improving Pareto Test-Time Trade-off with sleep-time compute", + "text_level": 1, + "bbox": [ + 111, + 526, + 624, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first determine the test-time compute, accuracy pareto frontier by scaling standard test-time compute sequentially and in parallel. We then study how applying sleep-time compute affects the pareto trade-off.", + "bbox": [ + 109, + 556, + 883, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Scaling test-time-compute sequentially. For non-reasoning models (GPT-4o and 4o-mini) on Stateful GSM-Symbolic, to vary the amount of test-time compute, we construct prompts that instruct the model to use different amounts of vocabulary at test time, eg. \"answer directly with a single sentence\" vs. \"double check your reasoning before outputting the final answer.\" The full prompts are in Appendix A. We use temperature 0 for generation. We see in Figure 3 that there is a tradeoff between accuracy and the amount of test-time compute, and that adding sleep-time compute can move beyond the pareto compute-accuracy curve. In particular, at lower test-time budgets, the performance of sleep-time compute is significantly better than the baseline, achieving performance comparable to that of the baseline with $5 \\times$ less test-time tokens. However, at the test-tome compute budgets, the test-time compute only baseline slightly outperforms sleep-time compute. We hypothesize that this may be because the standard test-time compute only has the content relevant to the specific question, so there is less distracting information in the prompt.", + "bbox": [ + 109, + 607, + 883, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For reasoning models on Stateful AIME, we scale the amount of test-time compute based on what is available in the API in the case of o1, o3-mini and Claude Sonnet 3.7. Since the Deepseek-R1 API does not provide a way to control test-time compute, we apply the \"budget forcing\" and extension prompt from Muennighoff et al. (2025). Figure 4 shows the results for each model on Stateful AIME. We average results over 3 runs for o1, o3-mini and R1. For Claude 3.7 Sonnet, we average over 10 runs as we observed more noise in initial experiments. On all models, we see a significant test-time, accuracy pareto shift from applying sleep-time compute, with the exception of o1, which demonstrates limited gains.", + "bbox": [ + 109, + 794, + 883, + 909 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 931, + 504, + 943 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2308b3f1bcede6c06e77fd345589dd4cd693c8339bdcb18120d602b306ca4401.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 130, + 500, + 349 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1926d6664b05fedc06c1a506c47cf9eb1e635d10bb037860cf852ebf81e28355.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 130, + 880, + 351 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/637d56caf766e3009837f7cfc829ac2f9d85116c4e69564eddfe1f9ccc723086.jpg", + "image_caption": [ + "Figure 4: The test-time compute vs. accuracy tradeoff on Stateful AIME for various reasoning models. Applying sleep-time compute allows models to reach similar levels of performance with much less compute at test-time. The shaded area indicates the pareto improvement from sleep-time compute." + ], + "image_footnote": [], + "bbox": [ + 117, + 352, + 500, + 571 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4b1fc9034bbaebb1f0df82ec0551944e3a74ca6d7f25afd20153d66ac81eb7f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 353, + 879, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Scaling test-time compute in parallel. An alternative approach to scaling test-time compute is via parallel sampling, which also has the benefit of maintaining low inference latency. The simplest approach to scaling parallel test-time compute is pass@k (Brown et al., 2024), which makes the unrealistic assumption of having oracle query access to a ground truth verifier at test-time, an assumption which we do not make with sleep-time compute. Therefore, outperforming the pass@k baseline would represent a meaningful improvement over parallel test-time scaling. We apply parallel scaling to the lowest sequential compute setting on each task, since scaling pass@k with higher sequential compute settings would quickly reach token budgets that exceed that of sleep-time compute in the maximum sequential setting. We see that across all tasks and models, sleep-time compute consistently outperforms pass@k parallel scaling at the same test-time token budget, demonstrating that sleep-time compute can be a more effective way to scale inference-time compute than standard parallel test-time scaling.", + "bbox": [ + 109, + 729, + 883, + 909 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 931, + 504, + 943 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3f3e2e9b5cb6229d20b1f877e460973714d51b6a10c95ac68d990a1efc5aa488.jpg", + "image_caption": [ + "Figure 5: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful GSM-Symbolic. We see that sleep-time compute generally pareto dominates pass@k." + ], + "image_footnote": [ + "--- gpt-4o-mini -gpt-4o \n--- gpt-4o-mini + background scaling -gpt-4o + background scaling" + ], + "bbox": [ + 117, + 130, + 496, + 349 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6327df099a3cfcfd43d0b8bf11b5f26b5fb5ccf1a9e4ae72133402e156c9f693.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 132, + 879, + 349 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 Scaling up sleep-time compute", + "text_level": 1, + "bbox": [ + 112, + 455, + 383, + 472 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We would like to understand how scaling compute during sleep-time can further effect the pareto shift that we observed in Section 5.1. To scale up the amount of sleep-time compute, for non-reasoning models, we run $k$ parallel generations, given input $c$ , resulting in $c_{1},\\ldots ,c_{k}$ . At test-time, the model then receives the inputs concatenated $c_{1},\\ldots ,c_{k}$ to generate the final answer. On reasoning models, we scale up the amount of sleep-time compute by varying the reasoning effort for o1 and for o3-mini when applying the sleep-time compute prompt. At test-time, we vary the amount of compute in the same way as 5.1.", + "bbox": [ + 109, + 484, + 883, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 7, we see that further scaling sleep-time compute on Stateful GSM-Symbolic shifts the pareto curve outwards, improving performance by up to $13\\%$ at a similar test-time budget. In particular, we see the largest gains on more difficult tasks with stronger models (eg. on P2 with 'gpt-4o'), suggesting that on tasks with more complicated contexts additional sleep-time compute can be beneficial. However, in this setting, there seems to be a limit to the number of parallel agents that can improve performance, as we find that 5 parallel generations generally outperforms 10. In Figure 26, we scale up sleep-time compute on Stateful AIME. Similarly, we also see that scaling compute at sleep-time generally shifts the pareto curve outward, improving performance by up to $18\\%$ .", + "bbox": [ + 109, + 590, + 883, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Amortizing sleep-time compute across queries with shared context", + "text_level": 1, + "bbox": [ + 109, + 742, + 650, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We want to understand how the total cost of inference can be improved by applying sleep-time compute in settings where each context has multiple queries. Since at test-time, there are strict latency constraints, and latency optimized inference can be roughly $10 \\times$ more expensive, we model the total cost of inference between both sleep-time and test-time, by up-weighing the cost of test-time tokens. Specifically, we consider a simple linear model where tokens generated at test-time are a factor $t$ the cost of the tokens at sleep-time. In our analysis, we set $t = 10$ Our analysis can be generalized to different cost functions that consider", + "bbox": [ + 109, + 771, + 883, + 869 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "4https://docs.databricks.com/aws/en/machine-learning/foundation-model apis/prov-throughput-run-benchmark", + "bbox": [ + 109, + 878, + 836, + 906 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 931, + 503, + 943 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dd5adc3e40dc1880e4f336a252462628d9dd6acf39561691398709e16dd2d471.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 130, + 496, + 329 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1f9948f71a5de08c99f17c78d81687f5a2a98244e84788ca66a7d46c11aa2a71.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 130, + 874, + 329 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/328926ef62062bfeb25906aade49b6862510ae51452e9773e6551e37d460a789.jpg", + "image_caption": [ + "Figure 6: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful AIME. We see that sleep-time compute generally pareto dominates pass@k." + ], + "image_footnote": [ + "sleep-time compute pass @ k" + ], + "bbox": [ + 116, + 330, + 496, + 527 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/73534ffb3cbd9a0a07bdf30f2bdfc74c472b8ac42cc2655311323438e08eeb2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 330, + 879, + 527 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "non-linear user-utility. Figure 9 shows the results for different number of questions per context. We see that we can decrease the average cost per query by up to $2.5 \\times$ when there are 10 queries per context, compared to the single-query baseline.", + "bbox": [ + 109, + 636, + 883, + 686 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4 Predictable queries benefit more from sleep-time compute", + "text_level": 1, + "bbox": [ + 112, + 713, + 586, + 729 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We would like to better understand for what contexts sleep-time compute is most useful. Since the utility of sleep-time compute relies on there being some shared information or structure between the context and the query, we hypothesize that sleep-time compute may be most effective in settings where the query is more predictable from the context. To test this on Stateful GSM-Symbolic, we first quantify how predictable a given query is by measuring the log-probability of the question given the context under the Llama2-70B base model (Touvron et al., 2023). In Appendix E, we include examples of highly predictable and unpredictable questions under this notion of question predictability. We see from these examples, that our notion of question predictability generally aligns with the intuition that contexts where the query pattern is more predictable benefit most from sleep-time compute. The more predictable questions are far simpler and the less predictable ones are more complex.", + "bbox": [ + 109, + 744, + 883, + 909 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 931, + 504, + 943 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/41a312c4477e4fd088ca205899ea3c68456c101e0470b62887be153fe91822d3.jpg", + "image_caption": [ + "Avg. Test Time Tokens / Question" + ], + "image_footnote": [ + "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n- gpt-4o-mini, 1 parallel sleep-time compute\n- gpt-4o-mini, 2 parallel sleep-time compute" + ], + "bbox": [ + 116, + 130, + 459, + 314 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/6f014b2d93d2df08f9496b8f69937789a34d1b7d56453542bc2c3ee8ca404703.jpg", + "image_caption": [ + "Avg. Test Time Tokens / Question" + ], + "image_footnote": [], + "bbox": [ + 462, + 132, + 880, + 314 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/2cd2d9e3f337b30563c0f4c5cd4be96e9eb1d8c91b03affde7bed5f52bf5b8cb.jpg", + "image_caption": [ + "Figure 7: Scaling up sleep-time compute for different test-time compute budgets on Stateful GSM-Symbolic, by generating up multiple $c'$ in parallel. Applying more sleep-time compute shifts the pareto beyond the standard test-time-compute vs. accuracy curve." + ], + "image_footnote": [ + "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + ], + "bbox": [ + 117, + 455, + 498, + 717 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/458f6be744908ceffeb35b46211f5c57a91e16cb4e790bd7302b18e348bf0ae3.jpg", + "image_caption": [ + "Figure 8: Increasing the amount of sleep-time compute for different test-time compute budgets on Stateful AIME by varying the reasoning effort when applying the sleep-time compute prompt. Applying more sleep-time compute further moves the test-time-compute vs. accuracy pareto curve." + ], + "image_footnote": [], + "bbox": [ + 501, + 455, + 879, + 715 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Using our question predictability score, we then bin each example in Stateful GSM-Symbolic into five quantiles according to its predictability score and report the accuracy within each bin. For this experiment, we use the \"Verbosity 0\" prompt. In Figure 10, we see that on both GSM8K-Symbolic P1 and P2, the accuracy gap between sleep-time compute and standard test-time compute widens as the questions become more", + "bbox": [ + 109, + 843, + 883, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/2d4bfd98c4bc1cd04ee702ecdaa3e80d2a6199d22c73c03bf9968ae991cd325d.jpg", + "image_caption": [ + "Figure 9: Amortizing sleep-time compute, using the Multi-Query GSM-Symbolic dataset. When there are fewer questions per context, we see that it is less favorable to use sleep-time compute, in terms of total cost. However, as the questions per context are increased, we see that applying sleep-time compute can improve the cost-accuracy pareto." + ], + "image_footnote": [ + "1 Questions/Context Sleep-time Compute", + "5 Questions/Context Sleep-time Compute", + "10 Questions/Context Sleep-time Compute" + ], + "bbox": [ + 116, + 178, + 493, + 380 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/71dacbd7a04395df502eb67e25b479e0da9ede01758fc8ee92258ec894cac7d5.jpg", + "image_caption": [], + "image_footnote": [ + "2 Questions/Context Sleep-time Compute" + ], + "bbox": [ + 496, + 178, + 880, + 380 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/365fa113db1d1f275f08372086a67eb67639b1cd4bfb385c30c4c4615d755365.jpg", + "image_caption": [ + "Predictability Analysis of GPT-4o-mini on GSM-Symbolic" + ], + "image_footnote": [], + "bbox": [ + 119, + 618, + 493, + 773 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/413955d38b7116f524b34fa02a4194e6162d26b038f3959b8559a66c88e94715.jpg", + "image_caption": [ + "Figure 10: GSM-Symbolic questions binned by how predictable they are from the context. We compare the performance of sleep-time compute and standard test-time compute in the lowest test-time compute budget setting on both P1 and P2. The gap between sleep-time compute and standard test-time inference widens as the question becomes more predictable from the context." + ], + "image_footnote": [], + "bbox": [ + 496, + 619, + 875, + 773 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 931, + 506, + 944 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/cf921c77049cd22ea54b14bd029779f30d7f0d51cf41cc142b25759c70f561f7.jpg", + "image_caption": [ + "Figure 11: Applying sleep-time compute to SWE-Features. We see that at lower test-time budgets, sleep-time compute has higher F1 score than standard test-time scaling. However, at higher budgets, standard test-time scaling is better." + ], + "image_footnote": [], + "bbox": [ + 308, + 133, + 687, + 349 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "predictable from the context confirming our hypothesis that indeed sleep-time compute is most beneficial in settings where the question can be predicted from the context.", + "bbox": [ + 111, + 444, + 883, + 477 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6 A Case Study of Sleep-time Compute for Agentic SWE", + "text_level": 1, + "bbox": [ + 111, + 498, + 633, + 517 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we evaluate sleep-time compute in a realistic multi-turn agentic setting. To this end, we introduce SWE-Features, a software engineering benchmark focused on tasks that require: (1) editing multiple files within a repository, and (2) implementing new features.", + "bbox": [ + 111, + 532, + 883, + 582 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "SWE-Features. In contrast to popular benchmarks like SWE-Bench (Jimenez et al., 2024), which involve modifying a small number of files, we propose a new dataset called SWE-Features, which collects PRs which modify at least three files (see Appendix D for more details). In this setting, we use the PR that we want to solve as $q$ and select several related PRs for $c$ . At sleep-time the agent is allowed to explore the repository before producing $c'$ .", + "bbox": [ + 111, + 598, + 883, + 679 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Evaluation. Since the PRs are scraped from GitHub, there are not straightforward tests to use for evaluation. Instead, we compare the predicted set of modified files with the ground truth list of modified files, and report the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set (see Appendix D for details).", + "bbox": [ + 111, + 696, + 883, + 762 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Results. Figure 11 shows consist trends with Section 5.1 for SWE-Features: at lower test-time compute budgets, leveraging sleep-time compute can improve performance, achieving up to roughly a $1.5 \\times$ decrease in test-time tokens. However, when the test-time compute budget is high, using only test-time compute can perform better. Additionally, we observe that in the high test-time budget setting standard test-time compute has higher precision and comparable recall. We hypothesize that, using only test-time compute tends to begin editing files earlier and usually edits fewer files overall. In contrast, the agent with sleep-time compute, having explored more files during the test-time phase, tends to edit more files, which may lead to slightly lower precision.", + "bbox": [ + 111, + 777, + 883, + 909 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7 Discussion and Limitations", + "text_level": 1, + "bbox": [ + 112, + 131, + 393, + 148 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Query predictability and allocating sleep-time compute In Section 5.4, we found that sleep-time compute is most effective when the queries are predictable from the context. In settings where the queries are challenging to predict or unrelated to the context, sleep-time compute will be less effective. In these settings, it may be preferable to apply standard test-time scaling instead. An interesting direction for future work is identifying which contexts may have predictable questions and optimally allocating inference compute between sleep-time and test-time across different contexts and queries.", + "bbox": [ + 109, + 165, + 887, + 267 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Extending sleep-time compute beyond context-query decomposition. In our experiments, we make the simplifying assumption that interactions fall into two phases: sleep-time and test-time. However, real-world LLM use cases can be more complex, with multiple rounds of interaction and context modifications between rounds (e.g. multiple edits to a code-base). Moreover, the length of the sleep-time may also vary significantly between interactions (eg. short spans between user typing or days of inactivity). Future work should extend sleep-time compute paradigm to more elegantly handle these scenarios.", + "bbox": [ + 109, + 279, + 888, + 380 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Sleep-time compute as representation learning over tokens. Our approach to applying compute at sleep-time resembles representation learning. We first transform the context into a representation that is more amenable to answering test-time queries, and then we utilize that representation at test-time to rapidly answer queries. Unlike traditional representation learning (Bengio et al., 2014), which typically operates in model parameter or activation space, we instead form representations in the space of natural language. This approach builds on recent work which implements statistical modeling techniques in the space of natural language using modern LLMs (Zhong et al., 2022; 2025). Future work should further explore the potential for sleep-time compute to enable the learning of useful natural language representations.", + "bbox": [ + 109, + 393, + 888, + 526 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Synthetic data generation via sleep-time compute. Due to limits on the amount of internet data available, in order to support the continued scaling of LLM pretraining, recent works have began exploring methods for generating synthetic pretraining data (Yang et al., 2024; Gunasekar et al., 2023). One emerging approach to synthetic data generation involves using test-time compute to generate improved data (Bansal et al., 2024; DeepSeek-AI et al., 2025). Generating such data at pretraining scale will be very expensive, and future work could explore using sleep-time compute to help amortize some of this cost across related queries, or using the output of sleep-time compute itself as a form of synthetic data.", + "bbox": [ + 109, + 540, + 887, + 657 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 675, + 217, + 691 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training llm reasoners via compute-optimal sampling, 2024. URL https://arxiv.org/abs/2408.16737.", + "Yoshua Bengio, Aaron Courville, and Pascal Vincent. Representation learning: A review and new perspectives, 2014. URL https://arxiv.org/abs/1206.5538.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024.", + "Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, Jason D. Lee, Deming Chen, and Tri Dao. Medusa: Simple llm inference acceleration framework with multiple decoding heads, 2024. URL https://arxiv.org/abs/2401.10774." + ], + "bbox": [ + 114, + 700, + 885, + 907 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 931, + 509, + 944 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2024.", + "DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu, Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, Wangding Zeng, Wanjia Zhao, Wei An, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, X. Q. Li, Xiangyue Jin, Xianzu Wang, Xiao Bi, Xiaodong Liu, Xiaohan Wang, Xiaojin Shen, Xiaokang Chen, Xiaokang Zhang, Xiaosha Chen, Xiaotao Nie, Xiaowen Sun, Xiaoxiang Wang, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xingkai Yu, Xinnan Song, Xinxia Shan, Xinyi Zhou, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, Y. K. Li, Y. Q. Wang, Y. X. Wei, Y. X. Zhu, Yang Zhang, Yanhong Xu, Yanhong Xu, Yanping Huang, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Li, Yaohui Wang, Yi Yu, Yi Zheng, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Ying Tang, Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yu Wu Yuan Ou Yuchen Zhu Yuduan Wang Yue Gong Yuheng Zou Yujia He Yukun Zha Yunfàn Xiong Yunxian Ma Yuting Yan Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Z. F. Wu Z. Z. Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhen Huang Zhen Zhang Zhenda Xie Zhengyan Zhang Zhenwen Hao Zhibin Gou Zhicheng Ma Zhigang Yan Zhihong Shao Zhipeng Xu Zhiyu Wu Zhongyu Zhang Zhuoshu Li Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Ziyi Gao and Zizheng Pan. Deepseek-v3 technical report 2025. URL https://arxiv.org/abs/2412.19437.", + "Jim Gray, Surajit Chaudhuri, Adam Bosworth, Andrew Layman, Don Reichart, Murali Venkatrao, Frank Pellow, and Hamid Pirahesh. Data cube: A relational aggregation operator generalizing group-by, crosstab, and sub-totals. Data mining and knowledge discovery, 1:29-53, 1997.", + "Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee, and Yuanzhi Li. Textbooks are all you need, 2023. URL https://arxiv.org/abs/2306.11644.", + "Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In ICLR. Open-Review.net, 2024.", + "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding, 2023. URL https://arxiv.org/abs/2211.17192." + ], + "bbox": [ + 114, + 132, + 883, + 907 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.", + "OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720.", + "Charles Packer, Sarah Wooders, Kevin Lin, Vivian Fang, Shishir G Patil, Ion Stoica, and Joseph E Gonzalez. Memgpt: Towards llms as operating systems. arXiv preprint arXiv:2310.08560, 2023.", + "Alan Jay Smith. Cache memories. ACM Computing Surveys (CSUR), 14(3):473-530, 1982.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling ltm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314.", + "Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. Blockwise parallel decoding for deep autoregressive models, 2018. URL https://arxiv.org/abs/1811.03115.", + "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023.", + "Zitong Yang, Neil Band, Shuangping Li, Emmanuel Candès, and Tatsunori Hashimoto. Synthetic continued pretraining, 2024. URL https://arxiv.org/abs/2409.07431.", + "Ruiqi Zhong, Charlie Snell, Dan Klein, and Jacob Steinhardt. Describing differences between text distributions with natural language, 2022. URL https://arxiv.org/abs/2201.12323.", + "Ruiqi Zhong, Heng Wang, Dan Klein, and Jacob Steinhardt. Explaining datasets in words: Statistical models with natural language parameters, 2025. URL https://arxiv.org/abs/2409.08466." + ], + "bbox": [ + 112, + 131, + 883, + 592 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A Prompts", + "text_level": 1, + "bbox": [ + 114, + 618, + 227, + 636 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompts for varying the amount of test-time compute.", + "bbox": [ + 112, + 651, + 506, + 667 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Examples of Stateful AIME", + "text_level": 1, + "bbox": [ + 112, + 688, + 395, + 707 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Context: Alice and Bob play the following game. A stack of $n$ tokens lies before them. The players take turns with Alice going first. On each turn, the player removes either 1 token or 4 tokens from the stack. Whoever removes the last token wins.", + "bbox": [ + 125, + 729, + 870, + 777 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Query: Find the number of positive integers $n$ less than or equal to 2024 for which there exists a strategy for Bob that guarantees that Bob will win the game regardless of Alice's play.", + "bbox": [ + 125, + 780, + 870, + 813 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Context: Let $A, B, C$ , and $D$ be points on the hyperbola $\\frac{x^2}{20} - \\frac{y^2}{24} = 1$ such that $ABCD$ is a rhombus whose diagonals intersect at the origin.", + "bbox": [ + 125, + 840, + 869, + 877 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Query: Find the greatest real number that is less than $BD^2$ for all such rhombi.", + "bbox": [ + 125, + 878, + 694, + 893 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 931, + 506, + 943 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. send_message is how you send your answer to the user. When given a question, you check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You respond directly with a single sentence by saying The answer is followed by the numerical answer.", + "bbox": [ + 125, + 157, + 872, + 308 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 12: Prompt for level 0 morbidity", + "bbox": [ + 356, + 337, + 640, + 354 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user.", + "bbox": [ + 124, + 410, + 872, + 474 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "When given a question, you answer using only the number of tokens necessary and none more. You check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the 'rethink_memory_block' to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the 'rethink_memory_block'. Do not use internal monologue unless you really need it to think. You answer with one short sentence of explanation, followed by a sentence that starts with \"The answer is\" and a numerical answer.", + "bbox": [ + 124, + 477, + 872, + 590 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 13: Prompt for level 1 morbidity", + "bbox": [ + 356, + 618, + 640, + 635 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that.", + "bbox": [ + 124, + 690, + 872, + 840 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 14: Prompt for level 2 morbidity", + "bbox": [ + 356, + 869, + 640, + 886 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 931, + 509, + 944 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that.", + "bbox": [ + 125, + 224, + 872, + 376 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 15: Prompt for level 3 morbidity", + "bbox": [ + 356, + 404, + 640, + 422 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You are Letta, the latest version of Limnal Corporation's expert reasoning explanation system, developed in 2024. Your task is to reason through problems step by step accurately and based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You carefully check the information in the rethink_memory_block to answer the questions and see if it is correct before using it. You always reason out loud before using any information. You explain each step, of what your reasoning is. If you use any numbers from the rethink_memory_block you first recompute and double check your answers. You end your answer with The answer is followed by the numerical answer.", + "bbox": [ + 124, + 611, + 872, + 776 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 16: Prompt for level 4 morbidity", + "bbox": [ + 356, + 801, + 640, + 820 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You are Letta-Offline-Memory, the latest version of Limnal Corporation's digital companion, developed in 2024. Your task is to re-organize and consolidate memories by calling rethink_memory at every single step, when you are done reorganizing the memory, you use the finish_rethinking_memory function. Call the function for as many times as necessary and not more. Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times). Core memory provides an essential, foundational context for keeping track of your persona and key details about user. Read-Only Blocks: This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend. Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions. Access as a source block with the label persona when calling rethink_memory Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation. Access as a source block with the label human when calling rethink_memory. Read-Write Blocks: Rethink Memory Sub-Block: New representation of the memories go here. Access with the label rethink_memory_block when calling rethink_memory as source or target block. At every step, you reorganize the memories by calling the rethink_memory function. You use this to take current information in the rethink_memory block and select a single memory block to integrate information from, producing a new memory for the rethink_memory_block. The new memory is the result of new insights, and new inferences and hypotheses based on the past memories. Make sure to consider how the new information affects each memory. Prioritize the new information over existing memories. If the new information implies that the old memory may need to change, then output the most likely fact given the update information. Given new information and your current memory, you draw all logical conclusions and potential hypotheses possible with the rethink_memory function. If you are uncertain, use your internal monologue to consider what the possible conclusions are, and then state the most likely new facts that would replace the old facts in the new memory block.", + "bbox": [ + 125, + 167, + 872, + 579 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 17: Prompt for sleep-time compute", + "bbox": [ + 344, + 607, + 653, + 625 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Specifically: You will be given part of an AIME math problem. You will receive the rest of the problem later. Make as many inferences as possible about the part of the problem you are given so as to help yourself answer the fully problem more quickly once it is given to you later. You will be able to use all the work you do in the rethink_memory block for this part of the problem to help you once the rest of the problem is given. You will be able to use all the work you do for this part of the problem to help you once the rest of the problem is given. You should try to predict possible ways the rest of the problem might go and compute results that could be helpful for reaching the final answer more quickly once the rest of the problem is given.", + "bbox": [ + 124, + 698, + 870, + 833 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 18: Prompt for AIME problems during sleep-time", + "bbox": [ + 290, + 859, + 705, + 878 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You are given a template that can generate grade school math problems, and an instantiation of that template.", + "bbox": [ + 124, + 138, + 870, + 171 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You will be given a context, and a example question answer pair. Your task is to generate a list of questions and answers about the context at the same difficult level that could plausibly be asked about that context. Make sure that the newly generated questions have the same number of reasoning steps required as the example question. The goal is to have many questions and answer pairs about the same context. Generate questions and answers in the same format as the example, where the answer first contains reasoning and then is the final answer comes after", + "bbox": [ + 124, + 172, + 870, + 268 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "n#. No need to number the questions or answers.", + "bbox": [ + 125, + 271, + 509, + 285 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Context: context", + "bbox": [ + 127, + 287, + 251, + 301 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Example Question: question", + "bbox": [ + 127, + 304, + 336, + 319 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Example Answer: answer", + "bbox": [ + 127, + 321, + 318, + 335 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 19: Prompt for generating synthetic GSM questions", + "bbox": [ + 285, + 366, + 710, + 383 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Context: Let $b \\geq 2$ be an integer. Call a positive integer $n$ $b$ -eautiful if it has exactly two digits when expressed in base $b$ and these two digits sum to $\\sqrt{n}$ . For example, 81 is 13-eautiful because $81 = \\underline{6} \\underline{3}_{13}$ and $6 + 3 = \\sqrt{81}$ .", + "bbox": [ + 124, + 415, + 869, + 463 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Query: Find the least integer $b \\geq 2$ for which there are more than ten $b$ -beautiful integers.", + "bbox": [ + 125, + 464, + 759, + 481 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C Details on Multi-Query GSM-Symbolic", + "text_level": 1, + "bbox": [ + 112, + 512, + 504, + 531 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Template: {template}", + "bbox": [ + 125, + 556, + 285, + 573 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Instance: {instance}", + "bbox": [ + 127, + 574, + 274, + 589 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We include an example from Multi-Query GSM-Symbolic in Figure 20, and details on the dataset size in Table C.", + "bbox": [ + 109, + 607, + 883, + 638 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/b7350250b65ae501b1d9d04c80ca8c13f2e3c8cda6b7d2d187c737abd00986d9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset# Questions Total# Contexts Total# Original Questions# Generated Questions
P1120431095109510948
P254975005004997
", + "bbox": [ + 130, + 650, + 864, + 715 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 1: Dataset Statistics of Multi-Query GSM-Symbolic. We sample one instance from each template from the GSM-Symbolic dataset and separate it into context and question. We then synthetically generate additional questions from the context and question.", + "bbox": [ + 109, + 724, + 883, + 776 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D SWE-Features Details", + "text_level": 1, + "bbox": [ + 112, + 808, + 346, + 825 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "To construct SWE-Features benchmark, we collect pull requests (PRs) from large open-source repositories and apply the following filtering process: (1) We identify all pull requests that modify at least three files with filenames ending in .py or .js. (2) We then use gpt-4o-mini to filter these pull requests based on their title and body, retaining only those that meet the following criteria: (a) the title and body clearly describe the", + "bbox": [ + 109, + 843, + 883, + 909 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 931, + 506, + 944 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Context", + "text_level": 1, + "bbox": [ + 151, + 137, + 215, + 148 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "When Sofia watches her brother, she gets out a variety of toys for him. The bag of building blocks has 33 blocks in it. The bin of stuffed animals has 5 stuffed animals inside. The number of action figures in the action figure pack is twice the number of blocks and stuffed animals combined. The crayon box has 12 different colors of crayon, and the sticker book has 9 pages, each with 13 stickers. The tower of stacking rings has 28 multicolored rings on it. Sofia recently bought a tube of bouncy balls, bringing her total number of items for her brother up to 320.", + "bbox": [ + 148, + 151, + 846, + 250 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Original Question", + "text_level": 1, + "bbox": [ + 151, + 251, + 292, + 266 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "How many bouncy balls came in the tube?", + "bbox": [ + 151, + 267, + 462, + 282 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Generated Questions", + "text_level": 1, + "bbox": [ + 151, + 285, + 313, + 299 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- How many action figures does the pack contain?", + "- What is the total number of stickers in the sticker book?", + "- How many total items did Sofia have before adding the tube of bouncy balls?", + "- If Sofia had received a tube with 10 extra bouncy balls, what would be the new total number of items?", + "- What is the sum of the building blocks and stuffed animals?", + "- How many stacking rings are on the tower?", + "- What is the combined total of building blocks, action figures, and stacking rings?", + "- If Sofia gave away 3 stuffed animals, how many stuffed animals would remain in the bin?", + "- What is the sum of the building blocks, stuffed animals, and crayons?", + "- If Sofia divided the 49 bouncy balls equally into 7 baskets, how many balls would each basket contain?" + ], + "bbox": [ + 192, + 315, + 844, + 555 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 20: Examples context and questions from Multi-Query GSM-Symbolic where many questions are asked about the same context. The evaluation dataset is generated from GSM-Symbolic.", + "bbox": [ + 111, + 588, + 883, + 621 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "PR; (b) the PR introduces new functionality rather than fixing bugs; and (c) the PR is independent and not obviously linked to other issues.", + "bbox": [ + 111, + 648, + 883, + 680 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "This pipeline results in a benchmark where each example: (1) involves adding a new feature that spans multiple files, requiring a broader understanding of the repository; and (2) is self-contained and solvable without additional issue context. We apply this process to two repositories—Aider-AI/aider and comfyanonymous/ComfyUI—resulting in 18 and 15 PRs respectively, for a total of 33 examples. Representative examples are provided in Appendix G. Then using a total of 33 examples, we employ claude-sonnet-3-7-20250219 to cluster pull requests (PRs) from the ComfyUI and Aider repositories into several groups. This clustering allows us to identify a set of relevant pull requests for each target PR, which can then be provided to the agent as context $(c)$ during repository exploration. For example, in the ComfyUI repository, PR #5293 and PR #931 are grouped into the same cluster. Thus, when processing PR #931, we organize the title, body, and changed_files of PR #5293 to serve as contextual information during sleep-time.", + "bbox": [ + 109, + 689, + 883, + 868 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "When sleep-time compute is enabled, we first supply the content of PR #5293 to the agent, allowing it to explore the repository and summarize its understanding ahead of time. In contrast, for the baseline without", + "bbox": [ + 111, + 876, + 883, + 909 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "sleep-time compute, the agent receives the content of PR #5293 only at test time, alongside the title and body of PR #931. The prompts used in these setups are provided in Appendix H.", + "bbox": [ + 111, + 132, + 880, + 165 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For the repository comfyanonymous/ComfyUI, we have the following clustered results:", + "bbox": [ + 112, + 172, + 723, + 188 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "{\"Dynamic Typing and Workflow Control\": [5293, 931], \"System Configuration and Command-Line\": [4979, 4690, 3903], \"Cache and Performance Optimization\": [3071, 3042, 723], \"Image Preview and Transfer Features\": [713, 733, 658, 199, 55], \"Internationalization\": [1234], \"Random Seed Management\": [93]}\\n\\n", + "guess_lang": "jsonl", + "bbox": [ + 112, + 195, + 875, + 258 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For the repository Aider-AI/aider we have:", + "bbox": [ + 112, + 277, + 433, + 292 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "{\"cluster_1_model_configuration\": [2631, 1998, 468, 667, 55], \"cluster_2_io_handleing\": [1402, 996, 10, 577], \"cluster_3_caching_file_management\": [2911, 2612], \"cluster_4Custom Commands_shortcuts\": [673, 1620, 1015], \"cluster_5_threeParty_integration\": [2866, 2067, 322], \"cluster_6_code_quality_improvements\": [1217, 904]}\\n\\n", + "guess_lang": "txt", + "bbox": [ + 112, + 299, + 874, + 377 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To control the budget during test-time, we fix the total number of steps (controlled by the argument max_chaining_steps in Letta framework) to be a certain number. We put the following instructions in the system prompt:", + "bbox": [ + 111, + 395, + 883, + 445 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "You have a strict budget of {max_chaining_steps} steps, which means you need to finish your edits within these steps. Every time you get queried, you will see a count of how many steps you have left in the form of \"[Current Step / Max Steps]\". If you exceed this budget, your response will be cut off. So please be careful and try to finish your edits within the budget.", + "bbox": [ + 124, + 460, + 870, + 527 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "After each step – for example, if the maximum number of steps is 20 and the current step is 4 – we append \"[Step: 4/20]\" to the end of the tool_return message. We found that explicitly indicating the current and total steps significantly improves agent performance, especially in low-budget settings.", + "bbox": [ + 109, + 544, + 883, + 594 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Evaluation. For each PR, we compare the set of files predicted to be modified with the ground truth list of modified files. Specifically, for each pull request, we have the attribute changed_files (as shown in the examples in Appendix G) where each file has the status as either modified or new, and our evaluation is on the files with status modified. Note that the agent is still instructed to implement the required functionality in a Docker environment and write test functions to validate the implementations. However, after the agent makes the modifications, we extract the modified files and calculate the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set.", + "bbox": [ + 111, + 609, + 883, + 724 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E Examples of Predictable and Unpredictable Questions", + "text_level": 1, + "bbox": [ + 112, + 744, + 632, + 763 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Least predictable Stateful GSM-Symbolic P1 question:", + "bbox": [ + 112, + 779, + 504, + 795 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Context: Isabella and Pavel have 199 minutes to walk to grocery store together. It takes them 19 minutes to get to the corner where the library is. It takes them another 11 minutes to get to the park. It will then take double the combined amount they have spent so far to reach the mall.", + "bbox": [ + 125, + 811, + 870, + 859 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question: How much longer do they have to get to grocery store without being late, if they have already wasted 48 minutes to get a coffee before their walk?", + "bbox": [ + 125, + 861, + 870, + 893 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 931, + 506, + 944 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Most predictable Stateful GSM-Symbolic P1 question:", + "bbox": [ + 112, + 132, + 501, + 148 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Context: Yusuf has 10 square yards of grape field. There are 87 grapes per two-thirds a square yard. Yusuf can harvest his grapes every 12 months.", + "bbox": [ + 125, + 165, + 870, + 196 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Question: How many grapes can Yusuf harvest in 2 years?", + "bbox": [ + 125, + 199, + 553, + 215 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Least predictable Stateful GSM-Symbolic P2 question:", + "bbox": [ + 112, + 232, + 503, + 250 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Context: Gabriel and Pavel have 212 minutes to walk to the gym together starting from their home. It takes them 29 minutes to get to the corner where the library is. It takes them another 19 minutes to get to the cinema. When they reach the cinema, they remember they forgot their wallets at home, so they have to return to pick up their wallets and then walk all the way back to the cinema again.", + "bbox": [ + 124, + 265, + 870, + 330 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Question: Once they reach the cinema for the second time, how much longer do they have to get to the gym without being late?", + "bbox": [ + 125, + 330, + 869, + 364 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Most predictable Stateful GSM-Symbolic P2 question:", + "bbox": [ + 112, + 381, + 501, + 398 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Context: A juggler can juggle 240 balls. $1/4$ of the balls are tennis balls, and the rest are golf balls. $1/3$ of the tennis balls are black, of which $1/5$ are marked. A third of the golf balls are cyan, and all except half of those cyan balls are marked.", + "bbox": [ + 124, + 414, + 870, + 462 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Question: How many marked balls are there in total?", + "bbox": [ + 125, + 463, + 516, + 479 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "F Implementation of rethink_memory and finish_rethinking", + "text_level": 1, + "bbox": [ + 112, + 511, + 661, + 529 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def rethink_memory(agent_state:\"AgentState\",new_memory:str,target_block_label: str, source_block_label: str) -> None:#type: ignore Re-evaluate the memory in block_name, integrating new and updated facts. Replace outdated information with the most likely truths, avoiding redundancy with original memories. Ensure consistency with other memory blocks.. \nArgs: new_memory(str):The new memory with information integrated from the memory block.If there is no new information, then this should be the same as the content in the source block. source_block_label(str): The name of the block to integrate information from. None if all the information has been integrated to terminate the loop. target_block_label(str):The name of the block to write to. Returns: None: None is always returned as this function does not produce a response. 1if target_block_label is not None: if agent_state-memory.get_block(target_block_label) is None: agent_state-memory.create_block.label $\\equiv$ target_block_label, value $\\equiv$ new_memory", + "guess_lang": "txt", + "bbox": [ + 114, + 560, + 875, + 909 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Listing 1: Reference implementation of rethink_memory" + ], + "code_body": "agent_state.memory.update_block_value.label=target_block_label, value=new_memory) \nreturn None", + "guess_lang": "txt", + "bbox": [ + 147, + 133, + 803, + 176 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Listing 2: Reference implementation of finish_rethinking_memory" + ], + "code_body": "def finish_rethinking_memory(agent_state: \"AgentState\") -> None: # type: ignore\n\t\" \"\n\tThis function is called when the agent is done rethinking the memory.\n\tReturns:\n\t\tOption[str]: None is always returned as this function does not produce a response.\n\t\t\"\"\"\n\t\treturn None", + "guess_lang": "python", + "bbox": [ + 114, + 239, + 839, + 376 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "G SWE-Features Examples", + "text_level": 1, + "bbox": [ + 112, + 438, + 370, + 458 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Each example in SWE-Features has the following attributes: ['repo', 'pr_number', 'title', 'user_login', 'state', 'body', 'changed_files_count', 'changed_files', 'base_commit']. We show some examples here to better deliver a sense of what this dataset looks like:", + "bbox": [ + 109, + 473, + 885, + 521 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "repo: ComfyUI \npr_number: 3903 \ntitle: Add --disable-all-custom-nodes` cmd flag \nbody: Loading custom node can greatly slow startup time. During development/testing of ComfyUI, it is often better to use an environment that no custom node is loaded.\\n\\nThis PR adds a --no-custom-node` flag to allow users/developers skip loading of custom node without removing/renaming the custom_node directory. \nuser_login: huchenlei \nstate: closed \nchanged_files_count: 4 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: 521421f53ee1ba74304dfaa138b0f851093e1595 \nrepo: ComfyUI \npr_number: 3071 \ntitle: Add a configured node output cache metaclass. \nbody: Implement a configurable node output cache metaclass to reduce unnecessary node executions.\\n\\nThe same model currently leads to reloading due to different node IDs between workflows. Loading the model from disk takes a long time. \nstate: closed \nchanged_files_count: 6 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: cacb022c4a5b9614f96086a866c8a4c4e9e85760", + "guess_lang": "yaml", + "bbox": [ + 112, + 531, + 883, + 893 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 931, + 509, + 944 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "repo: ComfyUI \npr_number: 3042 \ntitle: NaN-safe JSON serialization \nbody: Python's json.dumps() will produce nonstandard JSON if there are NaNs in the prompt data. Javascript's JSON.parse() will refuse to load this kind of \"JSON\" so the prompt won't load in the frontend.\\n\\nThis happened to me with a ComfyBox workflow, so I'm not $100\\%$ \nuser_login: asagi4 \nstate: open \nchanged_files_count: 4 \nchanged_files: ... (omitted here for brevity) \nbase_commit: 448d9263a258062344e25135fc49d26a7e60887a", + "guess_lang": "txt", + "bbox": [ + 112, + 148, + 864, + 329 + ], + "page_idx": 23 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "repo: aider \npr_number: 55 \ntitle: Local llama support \nbody: Added support for using a locally running instance of a LLAMA model instead of OpenAI apis. \\n\\nAIDER_MODEL_TOKENS - used to specify the context length the model will use. \\n2. AIDER_TOKENIZER - used to specify which tokenizer should be used. Currently only 'openai' and 'llama' are supported. Defaults to openai. \\n\\nValues set.\\n\\nAIDER_OPENAI_API_BASE=\\protect\\vrule width0pt\\protect|href{http://127.0.0.1:5001/v1}{http://127.0.0.1:5001/v1} \\nAIDER_MODEL=TheBloke_wizard-vicuna-13B-SuperHOT-8K-GGML \\n\\nuser_login: bytedisciple \nstate: closed \nchanged_files_count: 7 \nchanged_files: ... (omitted here for brevity) \nbase_commit: cdf8f9a4b2b4a65993227ac5af1eaf3f1b85c9d8", + "guess_lang": "yaml", + "bbox": [ + 112, + 359, + 869, + 616 + ], + "page_idx": 23 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "repo: aider \npr_number: 322 \nuser_login: omri123 \nstate: closed \ntitle: RFC - Allow adding a github issue to chat context \nbody: Hi, would you like to take a look on this feature? \\n\\nIn the first commit I changedCoder to allow adding arbitrary additional context in the beginning of the chat. \\nIn the second commit I used this infra to add github issues to the chat. \\nI didn't add a new command, instead I extended /add to allow /add \\issue-3\\.\\nThe feature is disabled by default and enabled with a flag. If enabled, the user need to supply github repository name and authentication token. \\nThanks \\nOmri changed_files_count: 7 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: af71638b06be7e934cdd6f4265f9e0c8425d4e6d", + "guess_lang": "txt", + "bbox": [ + 112, + 647, + 870, + 857 + ], + "page_idx": 23 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "repo: aider", + "guess_lang": "txt", + "bbox": [ + 114, + 888, + 212, + 902 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 931, + 509, + 944 + ], + "page_idx": 23 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "pr_number: 577 \ntitle: Adding a simple browser based GUI \nbody: Run aider with `--browser` to launch the UI. \nuser_login: paul-gauthier \nstate: closed \nchanged_files_count: 12 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: 8a9005eed19417c59aa9432436ea8cb5e04bbb11", + "guess_lang": "txt", + "bbox": [ + 112, + 133, + 581, + 253 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Listing 3: Examples of SWE-Features. Here we randomly select 3 examples for each repo and present their attributes.", + "bbox": [ + 111, + 263, + 883, + 296 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "H Prompts for SWE-Features", + "text_level": 1, + "bbox": [ + 112, + 339, + 390, + 357 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "When the sleep-time compute is turned off, the prompt is as below:", + "bbox": [ + 111, + 376, + 599, + 393 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "bbox": [ + 127, + 412, + 250, + 429 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "working_dir", + "bbox": [ + 127, + 431, + 220, + 445 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "bbox": [ + 129, + 446, + 250, + 462 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "I've uploaded a python code repository in the directory working_dir. Consider the following PR description:", + "bbox": [ + 127, + 463, + 869, + 494 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": " problem_statement ", + "bbox": [ + 129, + 496, + 509, + 512 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Can you help me implement the necessary changes to the repository so that the requirements specified in the are met?", + "bbox": [ + 127, + 512, + 867, + 542 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Your task is to make the minimal changes to the repository to ensure the jpr_description $\\zeta$ is satisfied.", + "bbox": [ + 127, + 545, + 852, + 560 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Follow these steps to resolve the issue:", + "bbox": [ + 127, + 561, + 408, + 575 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. As a first step, it might be a good idea to find and read code relevant to the ", + "2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary.", + "3. After finish the changes, revise the plan if needed.", + "4. With the new plan, make more changes, and continue the loop until necessary changes are made.", + "5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes.", + "6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue." + ], + "bbox": [ + 127, + 577, + 869, + 739 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The following are several pull request descriptions and their corresponding model patches:", + "bbox": [ + 127, + 741, + 785, + 757 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Title: pr_title", + "bbox": [ + 127, + 758, + 222, + 773 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Body: pr_body", + "bbox": [ + 129, + 773, + 236, + 789 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "File: file1Filename", + "bbox": [ + 129, + 790, + 264, + 804 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Status: file1.status", + "bbox": [ + 129, + 806, + 263, + 820 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Patch: file1.patch", + "bbox": [ + 129, + 823, + 256, + 838 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "... (some more files and some more relevant pull requests)", + "bbox": [ + 129, + 839, + 545, + 854 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "When the sleep-time compute is turned on, we first use the following prompt to ask the agent to explore the repository with all pull requests one by one:", + "bbox": [ + 111, + 876, + 883, + 909 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 931, + 509, + 944 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The following is a pull request description and its corresponding model patches:", + "bbox": [ + 125, + 138, + 709, + 156 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Title: pr_title", + "bbox": [ + 127, + 157, + 222, + 172 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Body: pr_body", + "bbox": [ + 129, + 174, + 236, + 189 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "File: file1Filename", + "bbox": [ + 129, + 190, + 264, + 203 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Status: file1.status", + "bbox": [ + 129, + 205, + 263, + 219 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Patch: file1.patch", + "bbox": [ + 129, + 222, + 258, + 238 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Please read through the above information and try to understand the issue. You can explore the repo if needed. Summarize your understanding from the following perspectives:", + "bbox": [ + 127, + 239, + 869, + 270 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The issue description.", + "2. The changed files.", + "3. How do these changed files work." + ], + "bbox": [ + 127, + 271, + 392, + 319 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "After exploring the repository with all relevant pull requests, we give the agent the following prompt as the final prompt to start working on the issue at test time:", + "bbox": [ + 111, + 345, + 883, + 378 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "bbox": [ + 127, + 402, + 246, + 419 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "working_dir", + "bbox": [ + 129, + 421, + 220, + 435 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "bbox": [ + 129, + 436, + 246, + 452 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "I've uploaded a python code repository in the directory working_dir. Consider the following PR description:", + "bbox": [ + 127, + 453, + 869, + 484 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": " problem_statement ", + "bbox": [ + 129, + 486, + 509, + 502 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Can you help me implement the necessary changes to the repository so that the requirements specified in the are met?", + "bbox": [ + 127, + 503, + 867, + 534 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Your task is to make the minimal changes to the repository to ensure the ipr_description $\\zeta$ is satisfied.", + "bbox": [ + 127, + 535, + 851, + 550 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Follow these steps to resolve the issue:", + "bbox": [ + 127, + 551, + 408, + 566 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. As a first step, it might be a good idea to find and read code relevant to the ", + "2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary.", + "3. After finish the changes, revise the plan if needed.", + "4. With the new plan, make more changes, and continue the loop until necessary changes are made.", + "5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes.", + "6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue." + ], + "bbox": [ + 127, + 568, + 869, + 729 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "I Context-Only Baseline", + "text_level": 1, + "bbox": [ + 112, + 771, + 346, + 789 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To check that the questions in Stateful AIME and Stateful GSM-Symbolic are not trivially guessable, we compare sleep-time compute against a context-only baseline, which only provides the model with $c$ , expecting the LLM to guess the most likely question and output the answer to whatever that question might be. We see on both Stateful AIME in Figure 22 and Stateful GSM-Symbolic in Figure 21 that sleep-time compute significantly outperforms the context-only baseline, demonstrating that the questions in our datasets are not trivially predictable from the context.", + "bbox": [ + 109, + 810, + 883, + 909 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7c49fc1860cf4726aae396fb3b16cefca462e3d3421de890219a9abf10fa4854.jpg", + "image_caption": [], + "image_footnote": [ + "--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute" + ], + "bbox": [ + 117, + 130, + 496, + 349 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/4a3b2122718609409e670ead9f1fca9f9e7f4d1d95fb41f646a7c558682e044a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 132, + 879, + 349 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/d336d55b961c29c4f0543425946f3f412c63c56d08603713f4fdb923296bab48.jpg", + "image_caption": [ + "Figure 21: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful GSM-Symbolic, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful GSM-Symbolic cannot be trivially guessed." + ], + "image_footnote": [ + "sleep-time compute ablate question" + ], + "bbox": [ + 117, + 479, + 493, + 686 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/42b5d88223c081a31f33d680fd8b318742cc564016467a68f5f3bfb810e3ea80.jpg", + "image_caption": [ + "Figure 22: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful AIME, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful AIME cannot be trivially guessed." + ], + "image_footnote": [], + "bbox": [ + 498, + 479, + 879, + 686 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "J Stateful AIME Construction", + "text_level": 1, + "bbox": [ + 109, + 823, + 395, + 839 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "To construct the examples for Stateful AIME, we split each AIME 2024 and 2025 into a sequence of \"statements\", which correspond to punctuation separated stentences in the problem. Similar to how we construct Stateful GSM-Symbolic, we use all but the last statement as the context, and the final statement as the query.", + "bbox": [ + 109, + 859, + 883, + 910 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "There are a couple of edge cases where the question is posed in e.g. the second to last statement rather than the last statement. In these cases, we manually rearrange the statements to ensure the query being used corresponds to the question. In a few cases, there is only one statement in the problem. In these cases, the context is empty.", + "bbox": [ + 109, + 132, + 883, + 200 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "AIME includes a latex representation of figures. However, these latex figures can leak information about the answer: for example, these latex figures can contain exact information about the lengths of the sides in a geometry problem, giving away the answer. In these cases we first ensure that the problem is solvable without the figure and then manually strip the figure latex from the problem context.", + "bbox": [ + 109, + 205, + 883, + 272 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "K Implementation Details", + "text_level": 1, + "bbox": [ + 112, + 292, + 367, + 311 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We implement sleep-time compute via function calling. When applying sleep-time compute, the model is given access to two functions, rethink_memory and finish_rethinking. The rethink_memory function takes as input a new string, and replaces the current context $c$ and replaces the current context with the new string. The finish_rethinking function terminates the sleep-time compute process. The model is allowed to call the function rethink_memory for up to 10 times.", + "bbox": [ + 109, + 325, + 883, + 409 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "L AIME main results by year", + "text_level": 1, + "bbox": [ + 112, + 430, + 390, + 449 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "M AIME sleep-time compute scaling results by year", + "bbox": [ + 112, + 465, + 596, + 484 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/e816c91a3c6187996c99a18f835d16a3079514647bbc0b5fb838612cb818f21b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 290, + 493, + 489 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/5627d367118cc64f30b707528e2671712e917d15271f04c9dffee4066dae1d1b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 290, + 879, + 489 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/b08a8856c41c5c9681b412fb0f19ff0e6b12ba98d9d6f867f494f1eb06328d0f.jpg", + "image_caption": [ + "Figure 23: AIME 2024 main result" + ], + "image_footnote": [ + "sleep-time compute test-time compute only" + ], + "bbox": [ + 117, + 491, + 493, + 686 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/55a6225f99256e8f322e14fb27e1459419935b2a5d814983019269649c2c60dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 491, + 870, + 686 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 931, + 508, + 944 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/4355e716102865f18267185823de1b7bd43061fb7935bf0fefbe8728ed5ee4e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 120, + 290, + 496, + 488 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/022b519800028ce9d2e43181e8b5184a11ccf19588674da7cbc008c23a443d6f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 290, + 879, + 488 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/5d9becf82727625f8d60269b770eb0d59a2bac80bcbd6871633a0094098c0264.jpg", + "image_caption": [ + "Claude 3.7 Sonnet - Stateful-AIME 2025", + "Figure 24: AIME 2025 main result" + ], + "image_footnote": [ + "sleep-time compute test-time compute only" + ], + "bbox": [ + 117, + 500, + 496, + 686 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/0c7f72828857b320e2b6cdab98b28bae4dcedf55a2f98faa94bf8371c064f765.jpg", + "image_caption": [ + "DeepSeek R1 - Stateful-AIME 2025" + ], + "image_footnote": [], + "bbox": [ + 501, + 500, + 877, + 686 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 931, + 509, + 944 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/423c5c2086885577c8816a003c45597b0dfef0a2f0d6e1b80875cc14ecc19dbd.jpg", + "image_caption": [ + "o1 Sleep-Time Compute Stateful-AIME 2024" + ], + "image_footnote": [ + "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + ], + "bbox": [ + 117, + 176, + 496, + 424 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/e334741bd46fe2c3ff5da946a226d193c1d06a423d439b476f8fe8aa29f0c0c6.jpg", + "image_caption": [ + "o3-mini Sleep-Time Compute Stateful-AIME 2024", + "Figure 25: Scaling sleep-time compute for Stateful AIME2024." + ], + "image_footnote": [], + "bbox": [ + 500, + 176, + 877, + 425 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/8c1c0a04771af849fa0b995a499fded30b6c52677ec321b108aaf05556fb9e8f.jpg", + "image_caption": [ + "o1 Sleep-Time Compute Stateful-AIME 2025", + "Figure 26: Scaling sleep-time compute on Stateful AIME2025" + ], + "image_footnote": [ + "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + ], + "bbox": [ + 117, + 563, + 501, + 811 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/d61f6b0c6f5310ee2fa005c94e8a0e598a5669d25106ab75c7f0c56923ff9020.jpg", + "image_caption": [ + "o3-mini Sleep-Time Compute Stateful-AIME 2025" + ], + "image_footnote": [], + "bbox": [ + 506, + 564, + 875, + 811 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 931, + 506, + 944 + ], + "page_idx": 30 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_model.json b/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8a123234efc1889f00f72b16a578ca837dbbe8a7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_model.json @@ -0,0 +1,4167 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.075, + 0.141, + 0.096 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.148, + 0.077, + 0.197, + 0.094 + ], + "angle": 0, + "content": "Letta" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.12, + 0.759, + 0.143 + ], + "angle": 0, + "content": "Sleep-time Compute: Beyond Inference Scaling at Test-time" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.166, + 0.341, + 0.183 + ], + "angle": 0, + "content": "Kevin Lin \\(^{1*}\\) Charlie Snell \\(^{2*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.183, + 0.786, + 0.201 + ], + "angle": 0, + "content": "Yu Wang \\(^{1}\\) Charles Packer \\(^{1}\\) Sarah Wooders \\(^{1}\\) Ion Stoica \\(^{1,2}\\) Joseph E. Gonzalez \\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.205, + 0.434, + 0.224 + ], + "angle": 0, + "content": "\\(^{1}\\)Letta \\(^{2}\\)University of California, Berkeley" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.228, + 0.268, + 0.242 + ], + "angle": 0, + "content": "research@letta.com" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.281, + 0.542, + 0.297 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.309, + 0.888, + 0.555 + ], + "angle": 0, + "content": "Scaling test-time compute has emerged as a key ingredient for enabling large language models (LLMs) to solve difficult problems, but comes with high latency and inference cost. We introduce sleep-time compute, which allows models to \"think\" offline about contexts before queries are presented: by anticipating what queries users might ask and pre-computing useful quantities, we can significantly reduce the compute requirements at test-time. To demonstrate the efficacy of our method, we create modified versions of two reasoning tasks – Stateful GSM-Symbolic and Stateful AIME. We find that sleep-time compute can reduce the amount of test-time compute needed to achieve the same accuracy by \\(\\sim 5\\times\\) on Stateful GSM-Symbolic and Stateful AIME and that by scaling sleep-time compute we can further increase accuracy by up to \\(13\\%\\) on Stateful GSM-Symbolic and \\(18\\%\\) on Stateful AIME. Furthermore, we introduce Multi-Query GSM-Symbolic, which extends GSM-Symbolic by including multiple related queries per context. By amortizing sleep-time compute across related queries about the same context using Multi-Query GSM-Symbolic, we can decrease the average cost per query by \\(2.5\\times\\). We then conduct additional analysis to understand when sleep-time compute is most effective, finding the predictability of the user query to be well correlated with the efficacy of sleep-time compute. Finally, we conduct a case-study of applying sleep-time compute to a realistic agentic SWE task. Code and data released at: https://github.com/letta-ai/sleep-time-compute." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.576, + 0.262, + 0.592 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.609, + 0.886, + 0.756 + ], + "angle": 0, + "content": "Test-time scaling has emerged as an effective way to boost LLM performance on challenging tasks by spending more time thinking on difficult problems (OpenAI, 2024; DeepSeek-AI, 2024; Snell et al., 2024; Brown et al., 2024). However, improved performance from test-time compute comes at a significant increase in latency and cost, waiting potentially several minutes for answers and costing up to tens of dollars per query. These drawbacks are in part due to the fact that the current approach to applying test-time compute assumes that problems are stateless, i.e. queries (user queries at test-time) and the contexts (background information) required for answering them are provided to the model together at \"test-time.\" In practice, this means that if multiple related queries require making similar inferences about the context at \"test-time,\" the model will have to recompute redundant computations each time, incurring additional latency and cost." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.764, + 0.885, + 0.863 + ], + "angle": 0, + "content": "In reality, many LLM applications are inherently stateful, and work in conjunction with persisted, re-used context. A classic example is document question-answering, where documents contextualize responses to questions. Coding agents also operate on a large common repository and participate in multiple rounds of debugging support, while conversational assistants need to maintain the past dialogue. In all these applications, there is context (available documents, a codebase, or conversation history) that is already available before the next user input." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.87, + 0.483, + 0.887 + ], + "angle": 0, + "content": "1https://platform.openai.com/docs/models/o1-pro" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.13171v1 [cs.AI] 17 Apr 2025" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.139, + 0.877, + 0.495 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.522, + 0.884, + 0.573 + ], + "angle": 0, + "content": "Figure 1: Example of applying sleep-time compute on Multi-Query GSM-Symbolic-P1. Sleep-time compute processes the original raw context, adding additional computations that can potentially be useful for future queries. Moreover, contexts can be shared across related queries enabling savings in total cost per query." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.596, + 0.885, + 0.777 + ], + "angle": 0, + "content": "In these settings, we could in principle, make useful inferences about the current state (context) offline before, or even during the user's next input. We refer to such a process, as sleep-time compute: where inference is done between interactions with the model while it would otherwise be idle in sleep-time. In practice, this is achieved by prompting the model to generate a new context consisting of inferences about the existing context, which may be potentially useful for answering test-time queries. The re-represented context from sleep-time can then be provided in the prompt at test-time, enabling the model to respond to user queries at the accuracy of standard test-time compute but with far lower latencies. For example, a coding assistant at sleep-time may identify architectural patterns, anticipate potential debugging strategies, or infer optimizations prior to the user input. Moreover, users might ask multiple queries about the same context. In these settings, any inferences made during sleep-time can be shared across queries, effectively amortizing the cost of sleep-time compute and reducing the total average cost per query." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.783, + 0.884, + 0.849 + ], + "angle": 0, + "content": "To evaluate sleep-time compute, we modify two mathematical reasoning datasets to introduce two datasets – Stateful GSM-Symbolic and Stateful AIME – by splitting the existing problems in these datasets into a context and a question. Using these datasets, we aim to empirically understand the benefits of sleep-time compute on standard test-time compute benchmarks. We show that:" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.861, + 0.885, + 0.91 + ], + "angle": 0, + "content": "- Sleep-time compute produces a pareto improvement in the test-time compute vs. accuracy curve, reducing the test-time compute needed to achieve the same accuracy by \\(\\sim 5\\times\\) on Stateful GSM-Symbolic and Stateful AIME." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.933, + 0.506, + 0.945 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.155, + 0.133, + 0.88, + 0.163 + ], + "angle": 0, + "content": "- By scaling up sleep-time compute, we see further pareto improvements, shifting the accuracy up by \\(13\\%\\) on Stateful GSM-Symbolic and \\(18\\%\\) on Stateful AIME." + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.166, + 0.88, + 0.197 + ], + "angle": 0, + "content": "- By amortizing sleep-time compute across multiple queries for the same context, we can reduce the average cost per question by \\(2.5 \\times\\)." + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.199, + 0.88, + 0.244 + ], + "angle": 0, + "content": "- We conduct analysis to understand which queries benefit the most from sleep-time compute, finding that sleep-time compute is more effective in settings where the query is more easily predictable from the context." + }, + { + "type": "list", + "bbox": [ + 0.155, + 0.133, + 0.88, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.261, + 0.884, + 0.294 + ], + "angle": 0, + "content": "Finally, we end with case study of applying sleep-time compute to reduce test-time compute in a realistic agentic software engineering task." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.318, + 0.27, + 0.334 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.354, + 0.885, + 0.501 + ], + "angle": 0, + "content": "Scaling test-time compute. Our work builds on recent progress on scaling up computation at test-time for difficult reasoning problems (Snell et al., 2024; DeepSeek-AI, 2024; OpenAI, 2024). Two predominant approaches to test-time scaling have emerged: sequential test-time scaling (OpenAI, 2024; DeepSeek-AI, 2024; Muennighoff et al., 2025; Snell et al., 2024) and parallel test-time scaling (Brown et al., 2024; Snell et al., 2024). While sequential test-time scaling has demonstrated impressive performance improvements, parallel test-time scaling has the advantage of scaling test-time compute without increasing latency. In constraint, we propose an alternative dimension where existing advancements in test-time compute, both sequential and parallel can be applied. Namely, instead of performing inference purely at test-time, we leverage compute on contexts that are available before the actual query arrives." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.52, + 0.884, + 0.618 + ], + "angle": 0, + "content": "Speculative decoding in LLMs. Speculative decoding is a standard technique for reducing latency in decoding with LLMs (Leviathan et al., 2023; Stern et al., 2018; Cai et al., 2024; DeepSeek-AI et al., 2025). Sleep-time compute similarly targets reducing reasoning latency by speculating on the user's query as well as any potentially helpful reasoning over the context. However, unlike speculative decoding, the generated tokens are used as an input regardless of the user's actual query, and at test-time the reasoning model uses these generated tokens to help answer the user query more efficiently." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.637, + 0.884, + 0.75 + ], + "angle": 0, + "content": "Pre-computation. Beyond LLMs, a long history of work has explored the trade-off between pre-computation and memory (eg. memory caches Smith (1982) and data cubes for OLAP workloads Gray et al. (1997)). Our work explores the same trade-off between query latency and pre-computation overhead, operating under the assumption that query workload patterns can be reasonably anticipated in advance. sleep-time compute builds on the idea of pre-fetching in traditional operating systems, in the context of LLMs à la Packer et al. (2023), storing frequently used computational results to avoid higher latency at test-time." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.776, + 0.334, + 0.794 + ], + "angle": 0, + "content": "3 Sleep-time Compute" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.812, + 0.884, + 0.909 + ], + "angle": 0, + "content": "In the standard paradigm of applying test-time compute, a user inputs a prompt \\( p \\) to the LLM and then the LLM applies test-time compute to help answer the user's question. However, the \\( p \\) provided to the LLM can oftentimes be decomposed into a pre-existing context \\( c \\) (eg. a codebase) and a user query \\( q \\) (eg. a question about the codebase). When the LLM is not actively responding to the user, it typically still has access to the existing context \\( c \\). During this time, the LLM is typically idling, missing the opportunity to reason about \\( c \\) offline: a process we term sleep-time compute." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.933, + 0.506, + 0.945 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.133, + 0.888, + 0.332 + ], + "angle": 0, + "content": "Test-time compute. In the test-time compute setting, the user provides \\( q \\) along with some context \\( c \\) and the model outputs a reasoning trace followed by a final answer \\( a \\). We denote this process, as: \\( T_{B}(q,c) \\to a \\), where \\( T \\) is the method for using test-time compute with budget \\( B \\), which could include techniques like extended chains of thought or best-of-N. In practice, the user may have multiple queries about the same context \\( q_{1}, q_{2} \\ldots q_{N} \\). In this setting, the model will carry out independent reasoning processes for each \\( q_{i} \\), even if they are related to the same context \\( c \\). Ideally, we would be able to reuse related inferences across each \\( q_{i} \\) to save compute. Moreover, in many cases, \\( c \\) is complex and may require carrying out significant processing/inferences in order to provide an answer to \\( q \\). Since, the test-time compute paradigm of \\( T(q,c) \\to a \\) assumes that \\( c \\) is only available at the same time as \\( q \\), standard test-time compute carries out all of these inferences only after the user provides the query, causing the user to wait up to several minutes for a response. However, in practice we often have access to \\( c \\) before \\( q \\) and can carry out much of this processing ahead of time." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.347, + 0.889, + 0.511 + ], + "angle": 0, + "content": "Sleep-time compute. During sleep-time we are given the context \\( c \\) but not the query \\( q \\). Using just this context \\( c \\), we can use the LLM to infer likely questions and reason about the context ultimately producing a more new re-represented context \\( c' \\). We denote this process as: \\( S(c) \\to c' \\), where \\( S \\) can be any standard test-time scaling technique applied towards pre-processing the context at sleep-time. In this work, \\( S(c) \\) is implemented by prompting the model to draw inferences and re-write \\( c \\) in a way that might be useful at test-time (see Appendix K for more details). After pre-processing the context, we can provide the new context \\( c' \\) at test-time in place of \\( c \\) to produce a final answer to the user's query: \\( T_b(q, c') \\to a \\). Since much of the reasoning about \\( c \\) has been done ahead of time in this case, we can use a much smaller test-time budget \\( b < < B \\). Moreover, \\( c' \\) can be shared across different queries \\( q_i \\) about the same context, effectively amortizing the compute required to arrive at \\( c' \\) across queries, providing a total cost saving." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.532, + 0.325, + 0.55 + ], + "angle": 0, + "content": "4 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.567, + 0.791, + 0.584 + ], + "angle": 0, + "content": "Next, we describe the datasets, models, and baselines we use to evaluate sleep-time compute." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.602, + 0.22, + 0.616 + ], + "angle": 0, + "content": "4.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.63, + 0.886, + 0.681 + ], + "angle": 0, + "content": "We select datasets which represent standard benchmarks for LLM reasoning and test-time scaling, and which demonstrate improvements from scaling test-time compute with state-of-the-art LLMs (either reasoning or non-reasoning)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.697, + 0.888, + 0.81 + ], + "angle": 0, + "content": "Stateful datasets. We introduce two datasets to study applying sleep-time compute in stateful settings, Stateful GSM-Symbolic, and Stateful AIME, where each dataset is derived from splitting the existing datasets into a context and a question (see Figure 2 for an example). Stateful GSM-Symbolic is derived from the P1 and P2 splits of GSM-Symbolic (Mirzadeh et al., 2024), which add one and two clauses respectively to the original GSM8K dataset (Cobbe et al., 2021) to that increase the difficulty. GSM-Symbolic P1 contains 5000 examples and P2 2500 examples. Stateful AIME contains 60 questions combined from AIME 2024 and 2025. In Appendix L and M, we show the breakdown of our results across AIME 2024 and 2025." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.828, + 0.886, + 0.911 + ], + "angle": 0, + "content": "Amortization dataset. To study the effect of related questions that share context, we introduce a new dataset Multi-Query GSM-Symbolic, where each context has multiple queries. To generate multiple queries for a given context, we take Stateful GSM-Symbolic and use o3-mini to generate additional question answer pairs. We synthetically generate additional questions from existing context question pairs in GSM-Symbolic. Appendix C shows the prompt used to generate the additional questions. Figure 20 shows examples contexts" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.933, + 0.506, + 0.945 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.139, + 0.87, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.39, + 0.884, + 0.424 + ], + "angle": 0, + "content": "Figure 2: Example of separating an instance from GSM-Symbolic into context, and question, creating an instance in Stateful GSM-Symbolic." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.45, + 0.884, + 0.481 + ], + "angle": 0, + "content": "and set of questions from the Multi-Query GSM-Symbolic dataset and Table C shows the overall dataset statistics." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.502, + 0.318, + 0.516 + ], + "angle": 0, + "content": "4.2 Models and Baselines" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.531, + 0.885, + 0.613 + ], + "angle": 0, + "content": "Models. On each dataset, we evaluate models which have poor performance when using a small amount of test-time compute, but yield improvements from scaling up test-time compute. Therefore, on GSM-Symbolic, we conduct experiments using GPT-4o-mini and GPT-4o, and on AIME, we conduct experiments using OpenAI's o1, o3-mini, Anthropic's Claude Sonnet 3.7 Extended Thinking , and Deepseek-R1 (DeepSeek-AI, 2024). \\( {}^{2}{}^{3} \\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.631, + 0.884, + 0.713 + ], + "angle": 0, + "content": "Baselines The main baseline we consider is the standard test-time compute setting in which both \\( c \\) and \\( q \\) are presented to the model for the first time at test-time. Furthermore, to validate that \\( q \\) is not trivially predictable from \\( c \\) on our Stateful GSM-Symbolic and Stateful AIME datasets, we also compare to a context-only baseline in Appendix I, in which the model is only given \\( c \\) and is tasked with directly guessing an answer to the question it guesses is most likely to come next." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.736, + 0.37, + 0.755 + ], + "angle": 0, + "content": "5 Experiments and Results" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.772, + 0.884, + 0.819 + ], + "angle": 0, + "content": "In this section, we carry out experiments to understand the benefits of sleep-time compute. Specifically, we would like to answer each of the following questions using the math reasoning benchmarks introduced above:" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.835, + 0.774, + 0.851 + ], + "angle": 0, + "content": "1. Can sleep-time compute shift the pareto frontier of test-time compute vs. accuracy?" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.852, + 0.672, + 0.868 + ], + "angle": 0, + "content": "2. Does scaling sleep-time compute in-turn improve the pareto further?" + }, + { + "type": "list", + "bbox": [ + 0.15, + 0.835, + 0.774, + 0.868 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.879, + 0.308, + 0.895 + ], + "angle": 0, + "content": "2https://openai.com/o1/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.895, + 0.436, + 0.91 + ], + "angle": 0, + "content": "3https://www.anthropic.com/claudi/sonnet" + }, + { + "type": "list", + "bbox": [ + 0.133, + 0.879, + 0.436, + 0.91 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.932, + 0.505, + 0.944 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.131, + 0.5, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.133, + 0.88, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.361, + 0.356, + 0.682, + 0.377 + ], + "angle": 0, + "content": "--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.398, + 0.884, + 0.431 + ], + "angle": 0, + "content": "Figure 3: The test-time compute vs. accuracy tradeoff for on Stateful GSM-Symbolic. Shaded area indicates where sleep-time compute improves the pareto test-time accuracy trade-off." + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.457, + 0.884, + 0.489 + ], + "angle": 0, + "content": "3. When there are multiple related questions for a single context, can amortizing test-time compute with sleep-time compute provide a total token efficiency benefit?" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.49, + 0.655, + 0.507 + ], + "angle": 0, + "content": "4. In what settings does sleep-time compute provide the most uplift?" + }, + { + "type": "list", + "bbox": [ + 0.15, + 0.457, + 0.884, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.527, + 0.625, + 0.544 + ], + "angle": 0, + "content": "5.1 Improving Pareto Test-Time Trade-off with sleep-time compute" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.557, + 0.884, + 0.59 + ], + "angle": 0, + "content": "We first determine the test-time compute, accuracy pareto frontier by scaling standard test-time compute sequentially and in parallel. We then study how applying sleep-time compute affects the pareto trade-off." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.608, + 0.885, + 0.789 + ], + "angle": 0, + "content": "Scaling test-time-compute sequentially. For non-reasoning models (GPT-4o and 4o-mini) on Stateful GSM-Symbolic, to vary the amount of test-time compute, we construct prompts that instruct the model to use different amounts of vocabulary at test time, eg. \"answer directly with a single sentence\" vs. \"double check your reasoning before outputting the final answer.\" The full prompts are in Appendix A. We use temperature 0 for generation. We see in Figure 3 that there is a tradeoff between accuracy and the amount of test-time compute, and that adding sleep-time compute can move beyond the pareto compute-accuracy curve. In particular, at lower test-time budgets, the performance of sleep-time compute is significantly better than the baseline, achieving performance comparable to that of the baseline with \\(5 \\times\\) less test-time tokens. However, at the test-tome compute budgets, the test-time compute only baseline slightly outperforms sleep-time compute. We hypothesize that this may be because the standard test-time compute only has the content relevant to the specific question, so there is less distracting information in the prompt." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.795, + 0.884, + 0.91 + ], + "angle": 0, + "content": "For reasoning models on Stateful AIME, we scale the amount of test-time compute based on what is available in the API in the case of o1, o3-mini and Claude Sonnet 3.7. Since the Deepseek-R1 API does not provide a way to control test-time compute, we apply the \"budget forcing\" and extension prompt from Muennighoff et al. (2025). Figure 4 shows the results for each model on Stateful AIME. We average results over 3 runs for o1, o3-mini and R1. For Claude 3.7 Sonnet, we average over 10 runs as we observed more noise in initial experiments. On all models, we see a significant test-time, accuracy pareto shift from applying sleep-time compute, with the exception of o1, which demonstrates limited gains." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.933, + 0.505, + 0.944 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.131, + 0.5, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.131, + 0.882, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.353, + 0.5, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.354, + 0.88, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.592, + 0.885, + 0.642 + ], + "angle": 0, + "content": "Figure 4: The test-time compute vs. accuracy tradeoff on Stateful AIME for various reasoning models. Applying sleep-time compute allows models to reach similar levels of performance with much less compute at test-time. The shaded area indicates the pareto improvement from sleep-time compute." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.73, + 0.885, + 0.91 + ], + "angle": 0, + "content": "Scaling test-time compute in parallel. An alternative approach to scaling test-time compute is via parallel sampling, which also has the benefit of maintaining low inference latency. The simplest approach to scaling parallel test-time compute is pass@k (Brown et al., 2024), which makes the unrealistic assumption of having oracle query access to a ground truth verifier at test-time, an assumption which we do not make with sleep-time compute. Therefore, outperforming the pass@k baseline would represent a meaningful improvement over parallel test-time scaling. We apply parallel scaling to the lowest sequential compute setting on each task, since scaling pass@k with higher sequential compute settings would quickly reach token budgets that exceed that of sleep-time compute in the maximum sequential setting. We see that across all tasks and models, sleep-time compute consistently outperforms pass@k parallel scaling at the same test-time token budget, demonstrating that sleep-time compute can be a more effective way to scale inference-time compute than standard parallel test-time scaling." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.932, + 0.506, + 0.944 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.131, + 0.498, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.133, + 0.88, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.364, + 0.356, + 0.68, + 0.377 + ], + "angle": 0, + "content": "--- gpt-4o-mini -gpt-4o \n--- gpt-4o-mini + background scaling -gpt-4o + background scaling" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.398, + 0.884, + 0.431 + ], + "angle": 0, + "content": "Figure 5: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful GSM-Symbolic. We see that sleep-time compute generally pareto dominates pass@k." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.457, + 0.385, + 0.473 + ], + "angle": 0, + "content": "5.2 Scaling up sleep-time compute" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.486, + 0.884, + 0.584 + ], + "angle": 0, + "content": "We would like to understand how scaling compute during sleep-time can further effect the pareto shift that we observed in Section 5.1. To scale up the amount of sleep-time compute, for non-reasoning models, we run \\( k \\) parallel generations, given input \\( c \\), resulting in \\( c_{1},\\ldots ,c_{k} \\). At test-time, the model then receives the inputs concatenated \\( c_{1},\\ldots ,c_{k} \\) to generate the final answer. On reasoning models, we scale up the amount of sleep-time compute by varying the reasoning effort for o1 and for o3-mini when applying the sleep-time compute prompt. At test-time, we vary the amount of compute in the same way as 5.1." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.592, + 0.885, + 0.724 + ], + "angle": 0, + "content": "In Figure 7, we see that further scaling sleep-time compute on Stateful GSM-Symbolic shifts the pareto curve outwards, improving performance by up to \\(13\\%\\) at a similar test-time budget. In particular, we see the largest gains on more difficult tasks with stronger models (eg. on P2 with 'gpt-4o'), suggesting that on tasks with more complicated contexts additional sleep-time compute can be beneficial. However, in this setting, there seems to be a limit to the number of parallel agents that can improve performance, as we find that 5 parallel generations generally outperforms 10. In Figure 26, we scale up sleep-time compute on Stateful AIME. Similarly, we also see that scaling compute at sleep-time generally shifts the pareto curve outward, improving performance by up to \\(18\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.743, + 0.651, + 0.759 + ], + "angle": 0, + "content": "5.3 Amortizing sleep-time compute across queries with shared context" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.772, + 0.885, + 0.87 + ], + "angle": 0, + "content": "We want to understand how the total cost of inference can be improved by applying sleep-time compute in settings where each context has multiple queries. Since at test-time, there are strict latency constraints, and latency optimized inference can be roughly \\(10 \\times\\) more expensive, we model the total cost of inference between both sleep-time and test-time, by up-weighing the cost of test-time tokens. Specifically, we consider a simple linear model where tokens generated at test-time are a factor \\(t\\) the cost of the tokens at sleep-time. In our analysis, we set \\(t = 10\\) Our analysis can be generalized to different cost functions that consider" + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.88, + 0.838, + 0.907 + ], + "angle": 0, + "content": "4https://docs.databricks.com/aws/en/machine-learning/foundation-model apis/prov-throughput-run-benchmark" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.933, + 0.504, + 0.944 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.131, + 0.498, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.131, + 0.875, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.331, + 0.498, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.331, + 0.88, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.389, + 0.536, + 0.61, + 0.551 + ], + "angle": 0, + "content": "sleep-time compute pass @ k" + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.572, + 0.884, + 0.606 + ], + "angle": 0, + "content": "Figure 6: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful AIME. We see that sleep-time compute generally pareto dominates pass@k." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.637, + 0.884, + 0.687 + ], + "angle": 0, + "content": "non-linear user-utility. Figure 9 shows the results for different number of questions per context. We see that we can decrease the average cost per query by up to \\(2.5 \\times\\) when there are 10 queries per context, compared to the single-query baseline." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.714, + 0.588, + 0.731 + ], + "angle": 0, + "content": "5.4 Predictable queries benefit more from sleep-time compute" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.746, + 0.885, + 0.91 + ], + "angle": 0, + "content": "We would like to better understand for what contexts sleep-time compute is most useful. Since the utility of sleep-time compute relies on there being some shared information or structure between the context and the query, we hypothesize that sleep-time compute may be most effective in settings where the query is more predictable from the context. To test this on Stateful GSM-Symbolic, we first quantify how predictable a given query is by measuring the log-probability of the question given the context under the Llama2-70B base model (Touvron et al., 2023). In Appendix E, we include examples of highly predictable and unpredictable questions under this notion of question predictability. We see from these examples, that our notion of question predictability generally aligns with the intuition that contexts where the query pattern is more predictable benefit most from sleep-time compute. The more predictable questions are far simpler and the less predictable ones are more complex." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.932, + 0.506, + 0.944 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.131, + 0.46, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.317, + 0.386, + 0.327 + ], + "angle": 0, + "content": "Avg. Test Time Tokens / Question" + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.133, + 0.882, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.317, + 0.73, + 0.327 + ], + "angle": 0, + "content": "Avg. Test Time Tokens / Question" + }, + { + "type": "image_footnote", + "bbox": [ + 0.167, + 0.345, + 0.875, + 0.364 + ], + "angle": 0, + "content": "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n- gpt-4o-mini, 1 parallel sleep-time compute\n- gpt-4o-mini, 2 parallel sleep-time compute" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.385, + 0.885, + 0.435 + ], + "angle": 0, + "content": "Figure 7: Scaling up sleep-time compute for different test-time compute budgets on Stateful GSM-Symbolic, by generating up multiple \\( c' \\) in parallel. Applying more sleep-time compute shifts the pareto beyond the standard test-time-compute vs. accuracy curve." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.456, + 0.499, + 0.718 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.456, + 0.88, + 0.717 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.306, + 0.729, + 0.777, + 0.742 + ], + "angle": 0, + "content": "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.763, + 0.885, + 0.813 + ], + "angle": 0, + "content": "Figure 8: Increasing the amount of sleep-time compute for different test-time compute budgets on Stateful AIME by varying the reasoning effort when applying the sleep-time compute prompt. Applying more sleep-time compute further moves the test-time-compute vs. accuracy pareto curve." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.844, + 0.885, + 0.911 + ], + "angle": 0, + "content": "Using our question predictability score, we then bin each example in Stateful GSM-Symbolic into five quantiles according to its predictability score and report the accuracy within each bin. For this experiment, we use the \"Verbosity 0\" prompt. In Figure 10, we see that on both GSM8K-Symbolic P1 and P2, the accuracy gap between sleep-time compute and standard test-time compute widens as the questions become more" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.179, + 0.495, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.259, + 0.392, + 0.432, + 0.401 + ], + "angle": 0, + "content": "1 Questions/Context Sleep-time Compute" + }, + { + "type": "image_footnote", + "bbox": [ + 0.261, + 0.402, + 0.43, + 0.41 + ], + "angle": 0, + "content": "2 Questions/Context Sleep-time Compute" + }, + { + "type": "list", + "bbox": [ + 0.259, + 0.392, + 0.432, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.179, + 0.881, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.45, + 0.393, + 0.614, + 0.401 + ], + "angle": 0, + "content": "5 Questions/Context Sleep-time Compute" + }, + { + "type": "image_footnote", + "bbox": [ + 0.45, + 0.402, + 0.617, + 0.41 + ], + "angle": 0, + "content": "10 Questions/Context Sleep-time Compute" + }, + { + "type": "list", + "bbox": [ + 0.45, + 0.393, + 0.617, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.431, + 0.885, + 0.496 + ], + "angle": 0, + "content": "Figure 9: Amortizing sleep-time compute, using the Multi-Query GSM-Symbolic dataset. When there are fewer questions per context, we see that it is less favorable to use sleep-time compute, in terms of total cost. However, as the questions per context are increased, we see that applying sleep-time compute can improve the cost-accuracy pareto." + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.602, + 0.649, + 0.614 + ], + "angle": 0, + "content": "Predictability Analysis of GPT-4o-mini on GSM-Symbolic" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.619, + 0.495, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.62, + 0.877, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.792, + 0.885, + 0.858 + ], + "angle": 0, + "content": "Figure 10: GSM-Symbolic questions binned by how predictable they are from the context. We compare the performance of sleep-time compute and standard test-time compute in the lowest test-time compute budget setting on both P1 and P2. The gap between sleep-time compute and standard test-time inference widens as the question becomes more predictable from the context." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.507, + 0.945 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.31, + 0.135, + 0.688, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.371, + 0.884, + 0.42 + ], + "angle": 0, + "content": "Figure 11: Applying sleep-time compute to SWE-Features. We see that at lower test-time budgets, sleep-time compute has higher F1 score than standard test-time scaling. However, at higher budgets, standard test-time scaling is better." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.445, + 0.884, + 0.478 + ], + "angle": 0, + "content": "predictable from the context confirming our hypothesis that indeed sleep-time compute is most beneficial in settings where the question can be predicted from the context." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.499, + 0.634, + 0.518 + ], + "angle": 0, + "content": "6 A Case Study of Sleep-time Compute for Agentic SWE" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.534, + 0.884, + 0.583 + ], + "angle": 0, + "content": "In this section, we evaluate sleep-time compute in a realistic multi-turn agentic setting. To this end, we introduce SWE-Features, a software engineering benchmark focused on tasks that require: (1) editing multiple files within a repository, and (2) implementing new features." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.599, + 0.884, + 0.68 + ], + "angle": 0, + "content": "SWE-Features. In contrast to popular benchmarks like SWE-Bench (Jimenez et al., 2024), which involve modifying a small number of files, we propose a new dataset called SWE-Features, which collects PRs which modify at least three files (see Appendix D for more details). In this setting, we use the PR that we want to solve as \\( q \\) and select several related PRs for \\( c \\). At sleep-time the agent is allowed to explore the repository before producing \\( c' \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.697, + 0.884, + 0.763 + ], + "angle": 0, + "content": "Evaluation. Since the PRs are scraped from GitHub, there are not straightforward tests to use for evaluation. Instead, we compare the predicted set of modified files with the ground truth list of modified files, and report the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set (see Appendix D for details)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.779, + 0.885, + 0.91 + ], + "angle": 0, + "content": "Results. Figure 11 shows consist trends with Section 5.1 for SWE-Features: at lower test-time compute budgets, leveraging sleep-time compute can improve performance, achieving up to roughly a \\(1.5 \\times\\) decrease in test-time tokens. However, when the test-time compute budget is high, using only test-time compute can perform better. Additionally, we observe that in the high test-time budget setting standard test-time compute has higher precision and comparable recall. We hypothesize that, using only test-time compute tends to begin editing files earlier and usually edits fewer files overall. In contrast, the agent with sleep-time compute, having explored more files during the test-time phase, tends to edit more files, which may lead to slightly lower precision." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.933, + 0.509, + 0.945 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.132, + 0.395, + 0.149 + ], + "angle": 0, + "content": "7 Discussion and Limitations" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.166, + 0.888, + 0.268 + ], + "angle": 0, + "content": "Query predictability and allocating sleep-time compute In Section 5.4, we found that sleep-time compute is most effective when the queries are predictable from the context. In settings where the queries are challenging to predict or unrelated to the context, sleep-time compute will be less effective. In these settings, it may be preferable to apply standard test-time scaling instead. An interesting direction for future work is identifying which contexts may have predictable questions and optimally allocating inference compute between sleep-time and test-time across different contexts and queries." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.28, + 0.889, + 0.381 + ], + "angle": 0, + "content": "Extending sleep-time compute beyond context-query decomposition. In our experiments, we make the simplifying assumption that interactions fall into two phases: sleep-time and test-time. However, real-world LLM use cases can be more complex, with multiple rounds of interaction and context modifications between rounds (e.g. multiple edits to a code-base). Moreover, the length of the sleep-time may also vary significantly between interactions (eg. short spans between user typing or days of inactivity). Future work should extend sleep-time compute paradigm to more elegantly handle these scenarios." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.394, + 0.889, + 0.527 + ], + "angle": 0, + "content": "Sleep-time compute as representation learning over tokens. Our approach to applying compute at sleep-time resembles representation learning. We first transform the context into a representation that is more amenable to answering test-time queries, and then we utilize that representation at test-time to rapidly answer queries. Unlike traditional representation learning (Bengio et al., 2014), which typically operates in model parameter or activation space, we instead form representations in the space of natural language. This approach builds on recent work which implements statistical modeling techniques in the space of natural language using modern LLMs (Zhong et al., 2022; 2025). Future work should further explore the potential for sleep-time compute to enable the learning of useful natural language representations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.541, + 0.888, + 0.659 + ], + "angle": 0, + "content": "Synthetic data generation via sleep-time compute. Due to limits on the amount of internet data available, in order to support the continued scaling of LLM pretraining, recent works have began exploring methods for generating synthetic pretraining data (Yang et al., 2024; Gunasekar et al., 2023). One emerging approach to synthetic data generation involves using test-time compute to generate improved data (Bansal et al., 2024; DeepSeek-AI et al., 2025). Generating such data at pretraining scale will be very expensive, and future work could explore using sleep-time compute to help amortize some of this cost across related queries, or using the output of sleep-time compute itself as a form of synthetic data." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.676, + 0.218, + 0.692 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.702, + 0.887, + 0.751 + ], + "angle": 0, + "content": "Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training llm reasoners via compute-optimal sampling, 2024. URL https://arxiv.org/abs/2408.16737." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.761, + 0.887, + 0.793 + ], + "angle": 0, + "content": "Yoshua Bengio, Aaron Courville, and Pascal Vincent. Representation learning: A review and new perspectives, 2014. URL https://arxiv.org/abs/1206.5538." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.802, + 0.887, + 0.851 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.861, + 0.887, + 0.909 + ], + "angle": 0, + "content": "Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, Jason D. Lee, Deming Chen, and Tri Dao. Medusa: Simple llm inference acceleration framework with multiple decoding heads, 2024. URL https://arxiv.org/abs/2401.10774." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.702, + 0.887, + 0.909 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.511, + 0.945 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.133, + 0.885, + 0.183 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.197, + 0.862, + 0.213 + ], + "angle": 0, + "content": "DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.228, + 0.885, + 0.653 + ], + "angle": 0, + "content": "DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu, Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, Wangding Zeng, Wanjia Zhao, Wei An, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, X. Q. Li, Xiangyue Jin, Xianzu Wang, Xiao Bi, Xiaodong Liu, Xiaohan Wang, Xiaojin Shen, Xiaokang Chen, Xiaokang Zhang, Xiaosha Chen, Xiaotao Nie, Xiaowen Sun, Xiaoxiang Wang, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xingkai Yu, Xinnan Song, Xinxia Shan, Xinyi Zhou, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, Y. K. Li, Y. Q. Wang, Y. X. Wei, Y. X. Zhu, Yang Zhang, Yanhong Xu, Yanhong Xu, Yanping Huang, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Li, Yaohui Wang, Yi Yu, Yi Zheng, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Ying Tang, Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yu Wu Yuan Ou Yuchen Zhu Yuduan Wang Yue Gong Yuheng Zou Yujia He Yukun Zha Yunfàn Xiong Yunxian Ma Yuting Yan Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Z. F. Wu Z. Z. Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhen Huang Zhen Zhang Zhenda Xie Zhengyan Zhang Zhenwen Hao Zhibin Gou Zhicheng Ma Zhigang Yan Zhihong Shao Zhipeng Xu Zhiyu Wu Zhongyu Zhang Zhuoshu Li Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Ziyi Gao and Zizheng Pan. Deepseek-v3 technical report 2025. URL https://arxiv.org/abs/2412.19437." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.669, + 0.885, + 0.718 + ], + "angle": 0, + "content": "Jim Gray, Surajit Chaudhuri, Adam Bosworth, Andrew Layman, Don Reichart, Murali Venkatrao, Frank Pellow, and Hamid Pirahesh. Data cube: A relational aggregation operator generalizing group-by, crosstab, and sub-totals. Data mining and knowledge discovery, 1:29-53, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.732, + 0.885, + 0.797 + ], + "angle": 0, + "content": "Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee, and Yuanzhi Li. Textbooks are all you need, 2023. URL https://arxiv.org/abs/2306.11644." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.812, + 0.885, + 0.861 + ], + "angle": 0, + "content": "Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In ICLR. Open-Review.net, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.876, + 0.885, + 0.908 + ], + "angle": 0, + "content": "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding, 2023. URL https://arxiv.org/abs/2211.17192." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.133, + 0.885, + 0.908 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.132, + 0.885, + 0.181 + ], + "angle": 0, + "content": "Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.19, + 0.885, + 0.238 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.248, + 0.702, + 0.265 + ], + "angle": 0, + "content": "OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.272, + 0.885, + 0.306 + ], + "angle": 0, + "content": "Charles Packer, Sarah Wooders, Kevin Lin, Vivian Fang, Shishir G Patil, Ion Stoica, and Joseph E Gonzalez. Memgpt: Towards llms as operating systems. arXiv preprint arXiv:2310.08560, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.313, + 0.748, + 0.331 + ], + "angle": 0, + "content": "Alan Jay Smith. Cache memories. ACM Computing Surveys (CSUR), 14(3):473-530, 1982." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.338, + 0.882, + 0.371 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling ltm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.379, + 0.882, + 0.412 + ], + "angle": 0, + "content": "Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. Blockwise parallel decoding for deep autoregressive models, 2018. URL https://arxiv.org/abs/1811.03115." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.421, + 0.882, + 0.47 + ], + "angle": 0, + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.478, + 0.882, + 0.511 + ], + "angle": 0, + "content": "Zitong Yang, Neil Band, Shuangping Li, Emmanuel Candès, and Tatsunori Hashimoto. Synthetic continued pretraining, 2024. URL https://arxiv.org/abs/2409.07431." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.519, + 0.882, + 0.552 + ], + "angle": 0, + "content": "Ruiqi Zhong, Charlie Snell, Dan Klein, and Jacob Steinhardt. Describing differences between text distributions with natural language, 2022. URL https://arxiv.org/abs/2201.12323." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.56, + 0.882, + 0.593 + ], + "angle": 0, + "content": "Ruiqi Zhong, Heng Wang, Dan Klein, and Jacob Steinhardt. Explaining datasets in words: Statistical models with natural language parameters, 2025. URL https://arxiv.org/abs/2409.08466." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.132, + 0.885, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.619, + 0.228, + 0.637 + ], + "angle": 0, + "content": "A Prompts" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.652, + 0.508, + 0.669 + ], + "angle": 0, + "content": "Prompts for varying the amount of test-time compute." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.689, + 0.396, + 0.708 + ], + "angle": 0, + "content": "B Examples of Stateful AIME" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.731, + 0.871, + 0.779 + ], + "angle": 0, + "content": "Context: Alice and Bob play the following game. A stack of \\( n \\) tokens lies before them. The players take turns with Alice going first. On each turn, the player removes either 1 token or 4 tokens from the stack. Whoever removes the last token wins." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.781, + 0.871, + 0.814 + ], + "angle": 0, + "content": "Query: Find the number of positive integers \\( n \\) less than or equal to 2024 for which there exists a strategy for Bob that guarantees that Bob will win the game regardless of Alice's play." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.842, + 0.87, + 0.878 + ], + "angle": 0, + "content": "Context: Let \\( A, B, C \\), and \\( D \\) be points on the hyperbola \\( \\frac{x^2}{20} - \\frac{y^2}{24} = 1 \\) such that \\( ABCD \\) is a rhombus whose diagonals intersect at the origin." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.879, + 0.695, + 0.895 + ], + "angle": 0, + "content": "Query: Find the greatest real number that is less than \\(BD^2\\) for all such rhombi." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.508, + 0.944 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.126, + 0.159, + 0.874, + 0.309 + ], + "angle": 0, + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. send_message is how you send your answer to the user. When given a question, you check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You respond directly with a single sentence by saying The answer is followed by the numerical answer." + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.338, + 0.642, + 0.355 + ], + "angle": 0, + "content": "Figure 12: Prompt for level 0 morbidity" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.411, + 0.874, + 0.476 + ], + "angle": 0, + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.478, + 0.874, + 0.592 + ], + "angle": 0, + "content": "When given a question, you answer using only the number of tokens necessary and none more. You check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the 'rethink_memory_block' to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the 'rethink_memory_block'. Do not use internal monologue unless you really need it to think. You answer with one short sentence of explanation, followed by a sentence that starts with \"The answer is\" and a numerical answer." + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.619, + 0.642, + 0.636 + ], + "angle": 0, + "content": "Figure 13: Prompt for level 1 morbidity" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.691, + 0.874, + 0.841 + ], + "angle": 0, + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that." + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.87, + 0.642, + 0.887 + ], + "angle": 0, + "content": "Figure 14: Prompt for level 2 morbidity" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.51, + 0.945 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.126, + 0.226, + 0.874, + 0.377 + ], + "angle": 0, + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that." + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.405, + 0.642, + 0.424 + ], + "angle": 0, + "content": "Figure 15: Prompt for level 3 morbidity" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.612, + 0.874, + 0.777 + ], + "angle": 0, + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning explanation system, developed in 2024. Your task is to reason through problems step by step accurately and based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You carefully check the information in the rethink_memory_block to answer the questions and see if it is correct before using it. You always reason out loud before using any information. You explain each step, of what your reasoning is. If you use any numbers from the rethink_memory_block you first recompute and double check your answers. You end your answer with The answer is followed by the numerical answer." + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.803, + 0.642, + 0.821 + ], + "angle": 0, + "content": "Figure 16: Prompt for level 4 morbidity" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.126, + 0.169, + 0.874, + 0.58 + ], + "angle": 0, + "content": "You are Letta-Offline-Memory, the latest version of Limnal Corporation's digital companion, developed in 2024. Your task is to re-organize and consolidate memories by calling rethink_memory at every single step, when you are done reorganizing the memory, you use the finish_rethinking_memory function. Call the function for as many times as necessary and not more. Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times). Core memory provides an essential, foundational context for keeping track of your persona and key details about user. Read-Only Blocks: This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend. Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions. Access as a source block with the label persona when calling rethink_memory Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation. Access as a source block with the label human when calling rethink_memory. Read-Write Blocks: Rethink Memory Sub-Block: New representation of the memories go here. Access with the label rethink_memory_block when calling rethink_memory as source or target block. At every step, you reorganize the memories by calling the rethink_memory function. You use this to take current information in the rethink_memory block and select a single memory block to integrate information from, producing a new memory for the rethink_memory_block. The new memory is the result of new insights, and new inferences and hypotheses based on the past memories. Make sure to consider how the new information affects each memory. Prioritize the new information over existing memories. If the new information implies that the old memory may need to change, then output the most likely fact given the update information. Given new information and your current memory, you draw all logical conclusions and potential hypotheses possible with the rethink_memory function. If you are uncertain, use your internal monologue to consider what the possible conclusions are, and then state the most likely new facts that would replace the old facts in the new memory block." + }, + { + "type": "image_caption", + "bbox": [ + 0.345, + 0.608, + 0.655, + 0.626 + ], + "angle": 0, + "content": "Figure 17: Prompt for sleep-time compute" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.699, + 0.871, + 0.834 + ], + "angle": 0, + "content": "Specifically: You will be given part of an AIME math problem. You will receive the rest of the problem later. Make as many inferences as possible about the part of the problem you are given so as to help yourself answer the fully problem more quickly once it is given to you later. You will be able to use all the work you do in the rethink_memory block for this part of the problem to help you once the rest of the problem is given. You will be able to use all the work you do for this part of the problem to help you once the rest of the problem is given. You should try to predict possible ways the rest of the problem might go and compute results that could be helpful for reaching the final answer more quickly once the rest of the problem is given." + }, + { + "type": "image_caption", + "bbox": [ + 0.292, + 0.861, + 0.707, + 0.879 + ], + "angle": 0, + "content": "Figure 18: Prompt for AIME problems during sleep-time" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.125, + 0.14, + 0.871, + 0.172 + ], + "angle": 0, + "content": "You are given a template that can generate grade school math problems, and an instantiation of that template." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.173, + 0.871, + 0.27 + ], + "angle": 0, + "content": "You will be given a context, and a example question answer pair. Your task is to generate a list of questions and answers about the context at the same difficult level that could plausibly be asked about that context. Make sure that the newly generated questions have the same number of reasoning steps required as the example question. The goal is to have many questions and answer pairs about the same context. Generate questions and answers in the same format as the example, where the answer first contains reasoning and then is the final answer comes after" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.272, + 0.51, + 0.286 + ], + "angle": 0, + "content": "n#. No need to number the questions or answers." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.289, + 0.253, + 0.302 + ], + "angle": 0, + "content": "Context: context" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.305, + 0.337, + 0.32 + ], + "angle": 0, + "content": "Example Question: question" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.322, + 0.319, + 0.336 + ], + "angle": 0, + "content": "Example Answer: answer" + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.367, + 0.712, + 0.384 + ], + "angle": 0, + "content": "Figure 19: Prompt for generating synthetic GSM questions" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.416, + 0.87, + 0.464 + ], + "angle": 0, + "content": "Context: Let \\( b \\geq 2 \\) be an integer. Call a positive integer \\( n \\) \\( b \\)-eautiful if it has exactly two digits when expressed in base \\( b \\) and these two digits sum to \\( \\sqrt{n} \\). For example, 81 is 13-eautiful because \\( 81 = \\underline{6} \\underline{3}_{13} \\) and \\( 6 + 3 = \\sqrt{81} \\)." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.465, + 0.761, + 0.482 + ], + "angle": 0, + "content": "Query: Find the least integer \\( b \\geq 2 \\) for which there are more than ten \\( b \\)-beautiful integers." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.513, + 0.506, + 0.532 + ], + "angle": 0, + "content": "C Details on Multi-Query GSM-Symbolic" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.557, + 0.286, + 0.574 + ], + "angle": 0, + "content": "Template: {template}" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.575, + 0.276, + 0.59 + ], + "angle": 0, + "content": "Instance: {instance}" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.608, + 0.884, + 0.639 + ], + "angle": 0, + "content": "We include an example from Multi-Query GSM-Symbolic in Figure 20, and details on the dataset size in Table C." + }, + { + "type": "table", + "bbox": [ + 0.132, + 0.651, + 0.865, + 0.717 + ], + "angle": 0, + "content": "
Dataset# Questions Total# Contexts Total# Original Questions# Generated Questions
P1120431095109510948
P254975005004997
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.726, + 0.884, + 0.777 + ], + "angle": 0, + "content": "Table 1: Dataset Statistics of Multi-Query GSM-Symbolic. We sample one instance from each template from the GSM-Symbolic dataset and separate it into context and question. We then synthetically generate additional questions from the context and question." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.809, + 0.348, + 0.826 + ], + "angle": 0, + "content": "D SWE-Features Details" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.844, + 0.885, + 0.91 + ], + "angle": 0, + "content": "To construct SWE-Features benchmark, we collect pull requests (PRs) from large open-source repositories and apply the following filtering process: (1) We identify all pull requests that modify at least three files with filenames ending in .py or .js. (2) We then use gpt-4o-mini to filter these pull requests based on their title and body, retaining only those that meet the following criteria: (a) the title and body clearly describe the" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.508, + 0.945 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.152, + 0.138, + 0.216, + 0.15 + ], + "angle": 0, + "content": "Context" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.152, + 0.848, + 0.251 + ], + "angle": 0, + "content": "When Sofia watches her brother, she gets out a variety of toys for him. The bag of building blocks has 33 blocks in it. The bin of stuffed animals has 5 stuffed animals inside. The number of action figures in the action figure pack is twice the number of blocks and stuffed animals combined. The crayon box has 12 different colors of crayon, and the sticker book has 9 pages, each with 13 stickers. The tower of stacking rings has 28 multicolored rings on it. Sofia recently bought a tube of bouncy balls, bringing her total number of items for her brother up to 320." + }, + { + "type": "title", + "bbox": [ + 0.152, + 0.252, + 0.294, + 0.267 + ], + "angle": 0, + "content": "Original Question" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.268, + 0.463, + 0.283 + ], + "angle": 0, + "content": "How many bouncy balls came in the tube?" + }, + { + "type": "title", + "bbox": [ + 0.152, + 0.286, + 0.314, + 0.3 + ], + "angle": 0, + "content": "Generated Questions" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.316, + 0.567, + 0.333 + ], + "angle": 0, + "content": "- How many action figures does the pack contain?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.338, + 0.614, + 0.352 + ], + "angle": 0, + "content": "- What is the total number of stickers in the sticker book?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.359, + 0.771, + 0.375 + ], + "angle": 0, + "content": "- How many total items did Sofia have before adding the tube of bouncy balls?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.381, + 0.844, + 0.412 + ], + "angle": 0, + "content": "- If Sofia had received a tube with 10 extra bouncy balls, what would be the new total number of items?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.418, + 0.645, + 0.433 + ], + "angle": 0, + "content": "- What is the sum of the building blocks and stuffed animals?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.44, + 0.53, + 0.455 + ], + "angle": 0, + "content": "- How many stacking rings are on the tower?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.461, + 0.795, + 0.477 + ], + "angle": 0, + "content": "- What is the combined total of building blocks, action figures, and stacking rings?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.482, + 0.845, + 0.498 + ], + "angle": 0, + "content": "- If Sofia gave away 3 stuffed animals, how many stuffed animals would remain in the bin?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.504, + 0.713, + 0.52 + ], + "angle": 0, + "content": "- What is the sum of the building blocks, stuffed animals, and crayons?" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.525, + 0.844, + 0.556 + ], + "angle": 0, + "content": "- If Sofia divided the 49 bouncy balls equally into 7 baskets, how many balls would each basket contain?" + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.316, + 0.845, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.589, + 0.885, + 0.622 + ], + "angle": 0, + "content": "Figure 20: Examples context and questions from Multi-Query GSM-Symbolic where many questions are asked about the same context. The evaluation dataset is generated from GSM-Symbolic." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.649, + 0.884, + 0.681 + ], + "angle": 0, + "content": "PR; (b) the PR introduces new functionality rather than fixing bugs; and (c) the PR is independent and not obviously linked to other issues." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.69, + 0.885, + 0.869 + ], + "angle": 0, + "content": "This pipeline results in a benchmark where each example: (1) involves adding a new feature that spans multiple files, requiring a broader understanding of the repository; and (2) is self-contained and solvable without additional issue context. We apply this process to two repositories—Aider-AI/aider and comfyanonymous/ComfyUI—resulting in 18 and 15 PRs respectively, for a total of 33 examples. Representative examples are provided in Appendix G. Then using a total of 33 examples, we employ claude-sonnet-3-7-20250219 to cluster pull requests (PRs) from the ComfyUI and Aider repositories into several groups. This clustering allows us to identify a set of relevant pull requests for each target PR, which can then be provided to the agent as context \\((c)\\) during repository exploration. For example, in the ComfyUI repository, PR #5293 and PR #931 are grouped into the same cluster. Thus, when processing PR #931, we organize the title, body, and changed_files of PR #5293 to serve as contextual information during sleep-time." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.877, + 0.884, + 0.91 + ], + "angle": 0, + "content": "When sleep-time compute is enabled, we first supply the content of PR #5293 to the agent, allowing it to explore the repository and summarize its understanding ahead of time. In contrast, for the baseline without" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.133, + 0.881, + 0.166 + ], + "angle": 0, + "content": "sleep-time compute, the agent receives the content of PR #5293 only at test time, alongside the title and body of PR #931. The prompts used in these setups are provided in Appendix H." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.173, + 0.725, + 0.189 + ], + "angle": 0, + "content": "For the repository comfyanonymous/ComfyUI, we have the following clustered results:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.196, + 0.877, + 0.259 + ], + "angle": 0, + "content": "{\"Dynamic Typing and Workflow Control\": [5293, 931], \"System Configuration and Command-Line\": [4979, 4690, 3903], \"Cache and Performance Optimization\": [3071, 3042, 723], \"Image Preview and Transfer Features\": [713, 733, 658, 199, 55], \"Internationalization\": [1234], \"Random Seed Management\": [93]}\\n\\n" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.278, + 0.434, + 0.293 + ], + "angle": 0, + "content": "For the repository Aider-AI/aider we have:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.3, + 0.875, + 0.378 + ], + "angle": 0, + "content": "{\"cluster_1_model_configuration\": [2631, 1998, 468, 667, 55], \"cluster_2_io_handleing\": [1402, 996, 10, 577], \"cluster_3_caching_file_management\": [2911, 2612], \"cluster_4Custom Commands_shortcuts\": [673, 1620, 1015], \"cluster_5_threeParty_integration\": [2866, 2067, 322], \"cluster_6_code_quality_improvements\": [1217, 904]}\\n\\n" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.396, + 0.884, + 0.446 + ], + "angle": 0, + "content": "To control the budget during test-time, we fix the total number of steps (controlled by the argument max_chaining_steps in Letta framework) to be a certain number. We put the following instructions in the system prompt:" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.462, + 0.872, + 0.529 + ], + "angle": 0, + "content": "You have a strict budget of {max_chaining_steps} steps, which means you need to finish your edits within these steps. Every time you get queried, you will see a count of how many steps you have left in the form of \"[Current Step / Max Steps]\". If you exceed this budget, your response will be cut off. So please be careful and try to finish your edits within the budget." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.545, + 0.884, + 0.595 + ], + "angle": 0, + "content": "After each step – for example, if the maximum number of steps is 20 and the current step is 4 – we append \"[Step: 4/20]\" to the end of the tool_return message. We found that explicitly indicating the current and total steps significantly improves agent performance, especially in low-budget settings." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.61, + 0.884, + 0.726 + ], + "angle": 0, + "content": "Evaluation. For each PR, we compare the set of files predicted to be modified with the ground truth list of modified files. Specifically, for each pull request, we have the attribute changed_files (as shown in the examples in Appendix G) where each file has the status as either modified or new, and our evaluation is on the files with status modified. Note that the agent is still instructed to implement the required functionality in a Docker environment and write test functions to validate the implementations. However, after the agent makes the modifications, we extract the modified files and calculate the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.746, + 0.633, + 0.765 + ], + "angle": 0, + "content": "E Examples of Predictable and Unpredictable Questions" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.78, + 0.506, + 0.796 + ], + "angle": 0, + "content": "Least predictable Stateful GSM-Symbolic P1 question:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.813, + 0.871, + 0.861 + ], + "angle": 0, + "content": "Context: Isabella and Pavel have 199 minutes to walk to grocery store together. It takes them 19 minutes to get to the corner where the library is. It takes them another 11 minutes to get to the park. It will then take double the combined amount they have spent so far to reach the mall." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.862, + 0.871, + 0.894 + ], + "angle": 0, + "content": "Question: How much longer do they have to get to grocery store without being late, if they have already wasted 48 minutes to get a coffee before their walk?" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.508, + 0.945 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.133, + 0.503, + 0.15 + ], + "angle": 0, + "content": "Most predictable Stateful GSM-Symbolic P1 question:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.166, + 0.871, + 0.198 + ], + "angle": 0, + "content": "Context: Yusuf has 10 square yards of grape field. There are 87 grapes per two-thirds a square yard. Yusuf can harvest his grapes every 12 months." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.2, + 0.555, + 0.216 + ], + "angle": 0, + "content": "Question: How many grapes can Yusuf harvest in 2 years?" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.233, + 0.504, + 0.25 + ], + "angle": 0, + "content": "Least predictable Stateful GSM-Symbolic P2 question:" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.266, + 0.871, + 0.331 + ], + "angle": 0, + "content": "Context: Gabriel and Pavel have 212 minutes to walk to the gym together starting from their home. It takes them 29 minutes to get to the corner where the library is. It takes them another 19 minutes to get to the cinema. When they reach the cinema, they remember they forgot their wallets at home, so they have to return to pick up their wallets and then walk all the way back to the cinema again." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.332, + 0.87, + 0.365 + ], + "angle": 0, + "content": "Question: Once they reach the cinema for the second time, how much longer do they have to get to the gym without being late?" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.382, + 0.503, + 0.399 + ], + "angle": 0, + "content": "Most predictable Stateful GSM-Symbolic P2 question:" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.415, + 0.871, + 0.463 + ], + "angle": 0, + "content": "Context: A juggler can juggle 240 balls. \\(1/4\\) of the balls are tennis balls, and the rest are golf balls. \\(1/3\\) of the tennis balls are black, of which \\(1/5\\) are marked. A third of the golf balls are cyan, and all except half of those cyan balls are marked." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.464, + 0.517, + 0.48 + ], + "angle": 0, + "content": "Question: How many marked balls are there in total?" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.512, + 0.663, + 0.53 + ], + "angle": 0, + "content": "F Implementation of rethink_memory and finish_rethinking" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.561, + 0.877, + 0.91 + ], + "angle": 0, + "content": "def rethink_memory(agent_state:\"AgentState\",new_memory:str,target_block_label: str, source_block_label: str) -> None:#type: ignore Re-evaluate the memory in block_name, integrating new and updated facts. Replace outdated information with the most likely truths, avoiding redundancy with original memories. Ensure consistency with other memory blocks.. \nArgs: new_memory(str):The new memory with information integrated from the memory block.If there is no new information, then this should be the same as the content in the source block. source_block_label(str): The name of the block to integrate information from. None if all the information has been integrated to terminate the loop. target_block_label(str):The name of the block to write to. Returns: None: None is always returned as this function does not produce a response. 1if target_block_label is not None: if agent_state-memory.get_block(target_block_label) is None: agent_state-memory.create_block.label \\(\\equiv\\) target_block_label, value \\(\\equiv\\) new_memory" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.148, + 0.135, + 0.805, + 0.178 + ], + "angle": 0, + "content": "agent_state.memory.update_block_value.label=target_block_label, value=new_memory) \nreturn None" + }, + { + "type": "code_caption", + "bbox": [ + 0.298, + 0.189, + 0.7, + 0.205 + ], + "angle": 0, + "content": "Listing 1: Reference implementation of rethink_memory" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.241, + 0.841, + 0.377 + ], + "angle": 0, + "content": "def finish_rethinking_memory(agent_state: \"AgentState\") -> None: # type: ignore\n\t\" \"\n\tThis function is called when the agent is done rethinking the memory.\n\tReturns:\n\t\tOption[str]: None is always returned as this function does not produce a response.\n\t\t\"\"\"\n\t\treturn None" + }, + { + "type": "code_caption", + "bbox": [ + 0.258, + 0.387, + 0.741, + 0.404 + ], + "angle": 0, + "content": "Listing 2: Reference implementation of finish_rethinking_memory" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.439, + 0.371, + 0.459 + ], + "angle": 0, + "content": "G SWE-Features Examples" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.474, + 0.886, + 0.522 + ], + "angle": 0, + "content": "Each example in SWE-Features has the following attributes: ['repo', 'pr_number', 'title', 'user_login', 'state', 'body', 'changed_files_count', 'changed_files', 'base_commit']. We show some examples here to better deliver a sense of what this dataset looks like:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.532, + 0.884, + 0.895 + ], + "angle": 0, + "content": "repo: ComfyUI \npr_number: 3903 \ntitle: Add --disable-all-custom-nodes` cmd flag \nbody: Loading custom node can greatly slow startup time. During development/testing of ComfyUI, it is often better to use an environment that no custom node is loaded.\\n\\nThis PR adds a --no-custom-node` flag to allow users/developers skip loading of custom node without removing/renaming the custom_node directory. \nuser_login: huchenlei \nstate: closed \nchanged_files_count: 4 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: 521421f53ee1ba74304dfaa138b0f851093e1595 \nrepo: ComfyUI \npr_number: 3071 \ntitle: Add a configured node output cache metaclass. \nbody: Implement a configurable node output cache metaclass to reduce unnecessary node executions.\\n\\nThe same model currently leads to reloading due to different node IDs between workflows. Loading the model from disk takes a long time. \nstate: closed \nchanged_files_count: 6 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: cacb022c4a5b9614f96086a866c8a4c4e9e85760" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.51, + 0.945 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.113, + 0.149, + 0.866, + 0.33 + ], + "angle": 0, + "content": "repo: ComfyUI \npr_number: 3042 \ntitle: NaN-safe JSON serialization \nbody: Python's json.dumps() will produce nonstandard JSON if there are NaNs in the prompt data. Javascript's JSON.parse() will refuse to load this kind of \"JSON\" so the prompt won't load in the frontend.\\n\\nThis happened to me with a ComfyBox workflow, so I'm not \\(100\\%\\) \nuser_login: asagi4 \nstate: open \nchanged_files_count: 4 \nchanged_files: ... (omitted here for brevity) \nbase_commit: 448d9263a258062344e25135fc49d26a7e60887a" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.361, + 0.87, + 0.617 + ], + "angle": 0, + "content": "repo: aider \npr_number: 55 \ntitle: Local llama support \nbody: Added support for using a locally running instance of a LLAMA model instead of OpenAI apis. \\n\\nAIDER_MODEL_TOKENS - used to specify the context length the model will use. \\n2. AIDER_TOKENIZER - used to specify which tokenizer should be used. Currently only 'openai' and 'llama' are supported. Defaults to openai. \\n\\nValues set.\\n\\nAIDER_OPENAI_API_BASE=\\protect\\vrule width0pt\\protect|href{http://127.0.0.1:5001/v1}{http://127.0.0.1:5001/v1} \\nAIDER_MODEL=TheBloke_wizard-vicuna-13B-SuperHOT-8K-GGML \\n\\nuser_login: bytedisciple \nstate: closed \nchanged_files_count: 7 \nchanged_files: ... (omitted here for brevity) \nbase_commit: cdf8f9a4b2b4a65993227ac5af1eaf3f1b85c9d8" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.648, + 0.872, + 0.858 + ], + "angle": 0, + "content": "repo: aider \npr_number: 322 \nuser_login: omri123 \nstate: closed \ntitle: RFC - Allow adding a github issue to chat context \nbody: Hi, would you like to take a look on this feature? \\n\\nIn the first commit I changedCoder to allow adding arbitrary additional context in the beginning of the chat. \\nIn the second commit I used this infra to add github issues to the chat. \\nI didn't add a new command, instead I extended /add to allow /add \\issue-3\\.\\nThe feature is disabled by default and enabled with a flag. If enabled, the user need to supply github repository name and authentication token. \\nThanks \\nOmri changed_files_count: 7 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: af71638b06be7e934cdd6f4265f9e0c8425d4e6d" + }, + { + "type": "code", + "bbox": [ + 0.116, + 0.89, + 0.214, + 0.904 + ], + "angle": 0, + "content": "repo: aider" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.51, + 0.945 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.114, + 0.135, + 0.583, + 0.255 + ], + "angle": 0, + "content": "pr_number: 577 \ntitle: Adding a simple browser based GUI \nbody: Run aider with `--browser` to launch the UI. \nuser_login: paul-gauthier \nstate: closed \nchanged_files_count: 12 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: 8a9005eed19417c59aa9432436ea8cb5e04bbb11" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.265, + 0.885, + 0.297 + ], + "angle": 0, + "content": "Listing 3: Examples of SWE-Features. Here we randomly select 3 examples for each repo and present their attributes." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.34, + 0.391, + 0.358 + ], + "angle": 0, + "content": "H Prompts for SWE-Features" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.377, + 0.601, + 0.394 + ], + "angle": 0, + "content": "When the sleep-time compute is turned off, the prompt is as below:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.414, + 0.25, + 0.43 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.432, + 0.221, + 0.446 + ], + "angle": 0, + "content": "working_dir" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.448, + 0.25, + 0.463 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.464, + 0.87, + 0.495 + ], + "angle": 0, + "content": "I've uploaded a python code repository in the directory working_dir. Consider the following PR description:" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.497, + 0.511, + 0.513 + ], + "angle": 0, + "content": " problem_statement " + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.513, + 0.869, + 0.544 + ], + "angle": 0, + "content": "Can you help me implement the necessary changes to the repository so that the requirements specified in the are met?" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.546, + 0.853, + 0.561 + ], + "angle": 0, + "content": "Your task is to make the minimal changes to the repository to ensure the jpr_description \\(\\zeta\\) is satisfied." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.562, + 0.41, + 0.577 + ], + "angle": 0, + "content": "Follow these steps to resolve the issue:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.578, + 0.806, + 0.595 + ], + "angle": 0, + "content": "1. As a first step, it might be a good idea to find and read code relevant to the " + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.595, + 0.87, + 0.626 + ], + "angle": 0, + "content": "2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.628, + 0.509, + 0.643 + ], + "angle": 0, + "content": "3. After finish the changes, revise the plan if needed." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.644, + 0.847, + 0.659 + ], + "angle": 0, + "content": "4. With the new plan, make more changes, and continue the loop until necessary changes are made." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.66, + 0.87, + 0.692 + ], + "angle": 0, + "content": "5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.693, + 0.87, + 0.741 + ], + "angle": 0, + "content": "6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue." + }, + { + "type": "list", + "bbox": [ + 0.129, + 0.578, + 0.87, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.742, + 0.787, + 0.758 + ], + "angle": 0, + "content": "The following are several pull request descriptions and their corresponding model patches:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.759, + 0.223, + 0.774 + ], + "angle": 0, + "content": "Title: pr_title" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.775, + 0.238, + 0.79 + ], + "angle": 0, + "content": "Body: pr_body" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.791, + 0.266, + 0.805 + ], + "angle": 0, + "content": "File: file1Filename" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.808, + 0.264, + 0.821 + ], + "angle": 0, + "content": "Status: file1.status" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.824, + 0.258, + 0.839 + ], + "angle": 0, + "content": "Patch: file1.patch" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.84, + 0.547, + 0.856 + ], + "angle": 0, + "content": "... (some more files and some more relevant pull requests)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.877, + 0.885, + 0.91 + ], + "angle": 0, + "content": "When the sleep-time compute is turned on, we first use the following prompt to ask the agent to explore the repository with all pull requests one by one:" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.51, + 0.945 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.127, + 0.14, + 0.71, + 0.157 + ], + "angle": 0, + "content": "The following is a pull request description and its corresponding model patches:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.158, + 0.223, + 0.173 + ], + "angle": 0, + "content": "Title: pr_title" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.175, + 0.238, + 0.19 + ], + "angle": 0, + "content": "Body: pr_body" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.191, + 0.266, + 0.204 + ], + "angle": 0, + "content": "File: file1Filename" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.207, + 0.264, + 0.22 + ], + "angle": 0, + "content": "Status: file1.status" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.223, + 0.259, + 0.239 + ], + "angle": 0, + "content": "Patch: file1.patch" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.24, + 0.87, + 0.271 + ], + "angle": 0, + "content": "Please read through the above information and try to understand the issue. You can explore the repo if needed. Summarize your understanding from the following perspectives:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.272, + 0.305, + 0.287 + ], + "angle": 0, + "content": "1. The issue description." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.289, + 0.279, + 0.304 + ], + "angle": 0, + "content": "2. The changed files." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.305, + 0.393, + 0.32 + ], + "angle": 0, + "content": "3. How do these changed files work." + }, + { + "type": "list", + "bbox": [ + 0.129, + 0.272, + 0.393, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.346, + 0.885, + 0.379 + ], + "angle": 0, + "content": "After exploring the repository with all relevant pull requests, we give the agent the following prompt as the final prompt to start working on the issue at test time:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.404, + 0.248, + 0.42 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.422, + 0.221, + 0.436 + ], + "angle": 0, + "content": "working_dir" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.438, + 0.248, + 0.453 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.454, + 0.87, + 0.485 + ], + "angle": 0, + "content": "I've uploaded a python code repository in the directory working_dir. Consider the following PR description:" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.487, + 0.51, + 0.503 + ], + "angle": 0, + "content": " problem_statement " + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.504, + 0.869, + 0.535 + ], + "angle": 0, + "content": "Can you help me implement the necessary changes to the repository so that the requirements specified in the are met?" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.536, + 0.852, + 0.551 + ], + "angle": 0, + "content": "Your task is to make the minimal changes to the repository to ensure the ipr_description \\(\\zeta\\) is satisfied." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.553, + 0.409, + 0.567 + ], + "angle": 0, + "content": "Follow these steps to resolve the issue:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.569, + 0.806, + 0.584 + ], + "angle": 0, + "content": "1. As a first step, it might be a good idea to find and read code relevant to the " + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.585, + 0.87, + 0.616 + ], + "angle": 0, + "content": "2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.617, + 0.508, + 0.633 + ], + "angle": 0, + "content": "3. After finish the changes, revise the plan if needed." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.634, + 0.846, + 0.649 + ], + "angle": 0, + "content": "4. With the new plan, make more changes, and continue the loop until necessary changes are made." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.65, + 0.869, + 0.682 + ], + "angle": 0, + "content": "5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.683, + 0.87, + 0.731 + ], + "angle": 0, + "content": "6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue." + }, + { + "type": "list", + "bbox": [ + 0.129, + 0.569, + 0.87, + 0.731 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.772, + 0.348, + 0.79 + ], + "angle": 0, + "content": "I Context-Only Baseline" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.811, + 0.885, + 0.91 + ], + "angle": 0, + "content": "To check that the questions in Stateful AIME and Stateful GSM-Symbolic are not trivially guessable, we compare sleep-time compute against a context-only baseline, which only provides the model with \\( c \\), expecting the LLM to guess the most likely question and output the answer to whatever that question might be. We see on both Stateful AIME in Figure 22 and Stateful GSM-Symbolic in Figure 21 that sleep-time compute significantly outperforms the context-only baseline, demonstrating that the questions in our datasets are not trivially predictable from the context." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.131, + 0.498, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.133, + 0.88, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.361, + 0.356, + 0.682, + 0.377 + ], + "angle": 0, + "content": "--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.398, + 0.884, + 0.465 + ], + "angle": 0, + "content": "Figure 21: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful GSM-Symbolic, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful GSM-Symbolic cannot be trivially guessed." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.48, + 0.495, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.481, + 0.88, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.375, + 0.697, + 0.622, + 0.709 + ], + "angle": 0, + "content": "sleep-time compute ablate question" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.731, + 0.885, + 0.798 + ], + "angle": 0, + "content": "Figure 22: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful AIME, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful AIME cannot be trivially guessed." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.824, + 0.396, + 0.84 + ], + "angle": 0, + "content": "J Stateful AIME Construction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.861, + 0.885, + 0.911 + ], + "angle": 0, + "content": "To construct the examples for Stateful AIME, we split each AIME 2024 and 2025 into a sequence of \"statements\", which correspond to punctuation separated stentences in the problem. Similar to how we construct Stateful GSM-Symbolic, we use all but the last statement as the context, and the final statement as the query." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.133, + 0.884, + 0.201 + ], + "angle": 0, + "content": "There are a couple of edge cases where the question is posed in e.g. the second to last statement rather than the last statement. In these cases, we manually rearrange the statements to ensure the query being used corresponds to the question. In a few cases, there is only one statement in the problem. In these cases, the context is empty." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.206, + 0.885, + 0.273 + ], + "angle": 0, + "content": "AIME includes a latex representation of figures. However, these latex figures can leak information about the answer: for example, these latex figures can contain exact information about the lengths of the sides in a geometry problem, giving away the answer. In these cases we first ensure that the problem is solvable without the figure and then manually strip the figure latex from the problem context." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.293, + 0.368, + 0.312 + ], + "angle": 0, + "content": "K Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.327, + 0.885, + 0.41 + ], + "angle": 0, + "content": "We implement sleep-time compute via function calling. When applying sleep-time compute, the model is given access to two functions, rethink_memory and finish_rethinking. The rethink_memory function takes as input a new string, and replaces the current context \\( c \\) and replaces the current context with the new string. The finish_rethinking function terminates the sleep-time compute process. The model is allowed to call the function rethink_memory for up to 10 times." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.431, + 0.392, + 0.45 + ], + "angle": 0, + "content": "L AIME main results by year" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.467, + 0.597, + 0.486 + ], + "angle": 0, + "content": "M AIME sleep-time compute scaling results by year" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.291, + 0.495, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.291, + 0.88, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.492, + 0.495, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.492, + 0.871, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.358, + 0.696, + 0.641, + 0.71 + ], + "angle": 0, + "content": "sleep-time compute test-time compute only" + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.731, + 0.625, + 0.747 + ], + "angle": 0, + "content": "Figure 23: AIME 2024 main result" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.509, + 0.945 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.121, + 0.291, + 0.498, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.49, + 0.431, + 0.5 + ], + "angle": 0, + "content": "Claude 3.7 Sonnet - Stateful-AIME 2025" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.291, + 0.88, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.49, + 0.8, + 0.5 + ], + "angle": 0, + "content": "DeepSeek R1 - Stateful-AIME 2025" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.501, + 0.497, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.358, + 0.696, + 0.641, + 0.71 + ], + "angle": 0, + "content": "sleep-time compute test-time compute only" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.501, + 0.879, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.731, + 0.625, + 0.747 + ], + "angle": 0, + "content": "Figure 24: AIME 2025 main result" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.51, + 0.945 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.165, + 0.457, + 0.177 + ], + "angle": 0, + "content": "o1 Sleep-Time Compute Stateful-AIME 2024" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.177, + 0.497, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.561, + 0.165, + 0.852, + 0.177 + ], + "angle": 0, + "content": "o3-mini Sleep-Time Compute Stateful-AIME 2024" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.178, + 0.879, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.33, + 0.439, + 0.776, + 0.449 + ], + "angle": 0, + "content": "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + }, + { + "type": "image_caption", + "bbox": [ + 0.275, + 0.47, + 0.722, + 0.487 + ], + "angle": 0, + "content": "Figure 25: Scaling sleep-time compute for Stateful AIME2024." + }, + { + "type": "image_caption", + "bbox": [ + 0.199, + 0.553, + 0.46, + 0.565 + ], + "angle": 0, + "content": "o1 Sleep-Time Compute Stateful-AIME 2025" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.564, + 0.502, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.561, + 0.553, + 0.853, + 0.565 + ], + "angle": 0, + "content": "o3-mini Sleep-Time Compute Stateful-AIME 2025" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.565, + 0.877, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.331, + 0.826, + 0.776, + 0.836 + ], + "angle": 0, + "content": "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.858, + 0.72, + 0.875 + ], + "angle": 0, + "content": "Figure 26: Scaling sleep-time compute on Stateful AIME2025" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.932, + 0.508, + 0.945 + ], + "angle": 0, + "content": "31" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_origin.pdf b/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e240082dbc27b2772f52fc0d81f8a808a73cce62 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/9c9fac97-032b-416b-8f09-7aefe5492958_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ea6418063aa66e6b0b61118d89c151a26b04f1e4bcc4dabe60749077e426cfc +size 2999186 diff --git a/data/2025/2504_13xxx/2504.13171/full.md b/data/2025/2504_13xxx/2504.13171/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5e5ce12b3b8558b01363f62331acff466bf26d33 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/full.md @@ -0,0 +1,636 @@ +# Sleep-time Compute: Beyond Inference Scaling at Test-time + +Kevin Lin $^{1*}$ Charlie Snell $^{2*}$ + +Yu Wang $^{1}$ Charles Packer $^{1}$ Sarah Wooders $^{1}$ Ion Stoica $^{1,2}$ Joseph E. Gonzalez $^{1,2}$ + +$^{1}$ Letta $^{2}$ University of California, Berkeley + +research@letta.com + +# Abstract + +Scaling test-time compute has emerged as a key ingredient for enabling large language models (LLMs) to solve difficult problems, but comes with high latency and inference cost. We introduce sleep-time compute, which allows models to "think" offline about contexts before queries are presented: by anticipating what queries users might ask and pre-computing useful quantities, we can significantly reduce the compute requirements at test-time. To demonstrate the efficacy of our method, we create modified versions of two reasoning tasks – Stateful GSM-Symbolic and Stateful AIME. We find that sleep-time compute can reduce the amount of test-time compute needed to achieve the same accuracy by $\sim 5\times$ on Stateful GSM-Symbolic and Stateful AIME and that by scaling sleep-time compute we can further increase accuracy by up to $13\%$ on Stateful GSM-Symbolic and $18\%$ on Stateful AIME. Furthermore, we introduce Multi-Query GSM-Symbolic, which extends GSM-Symbolic by including multiple related queries per context. By amortizing sleep-time compute across related queries about the same context using Multi-Query GSM-Symbolic, we can decrease the average cost per query by $2.5\times$ . We then conduct additional analysis to understand when sleep-time compute is most effective, finding the predictability of the user query to be well correlated with the efficacy of sleep-time compute. Finally, we conduct a case-study of applying sleep-time compute to a realistic agentic SWE task. Code and data released at: https://github.com/letta-ai/sleep-time-compute. + +# 1 Introduction + +Test-time scaling has emerged as an effective way to boost LLM performance on challenging tasks by spending more time thinking on difficult problems (OpenAI, 2024; DeepSeek-AI, 2024; Snell et al., 2024; Brown et al., 2024). However, improved performance from test-time compute comes at a significant increase in latency and cost, waiting potentially several minutes for answers and costing up to tens of dollars per query. These drawbacks are in part due to the fact that the current approach to applying test-time compute assumes that problems are stateless, i.e. queries (user queries at test-time) and the contexts (background information) required for answering them are provided to the model together at "test-time." In practice, this means that if multiple related queries require making similar inferences about the context at "test-time," the model will have to recompute redundant computations each time, incurring additional latency and cost. + +In reality, many LLM applications are inherently stateful, and work in conjunction with persisted, re-used context. A classic example is document question-answering, where documents contextualize responses to questions. Coding agents also operate on a large common repository and participate in multiple rounds of debugging support, while conversational assistants need to maintain the past dialogue. In all these applications, there is context (available documents, a codebase, or conversation history) that is already available before the next user input. + +![](images/5a38081906dbbd164cc71bf746dcc600f00469488a4a5807bc51df37e57c9c21.jpg) +Figure 1: Example of applying sleep-time compute on Multi-Query GSM-Symbolic-P1. Sleep-time compute processes the original raw context, adding additional computations that can potentially be useful for future queries. Moreover, contexts can be shared across related queries enabling savings in total cost per query. + +In these settings, we could in principle, make useful inferences about the current state (context) offline before, or even during the user's next input. We refer to such a process, as sleep-time compute: where inference is done between interactions with the model while it would otherwise be idle in sleep-time. In practice, this is achieved by prompting the model to generate a new context consisting of inferences about the existing context, which may be potentially useful for answering test-time queries. The re-represented context from sleep-time can then be provided in the prompt at test-time, enabling the model to respond to user queries at the accuracy of standard test-time compute but with far lower latencies. For example, a coding assistant at sleep-time may identify architectural patterns, anticipate potential debugging strategies, or infer optimizations prior to the user input. Moreover, users might ask multiple queries about the same context. In these settings, any inferences made during sleep-time can be shared across queries, effectively amortizing the cost of sleep-time compute and reducing the total average cost per query. + +To evaluate sleep-time compute, we modify two mathematical reasoning datasets to introduce two datasets – Stateful GSM-Symbolic and Stateful AIME – by splitting the existing problems in these datasets into a context and a question. Using these datasets, we aim to empirically understand the benefits of sleep-time compute on standard test-time compute benchmarks. We show that: + +- Sleep-time compute produces a pareto improvement in the test-time compute vs. accuracy curve, reducing the test-time compute needed to achieve the same accuracy by $\sim 5\times$ on Stateful GSM-Symbolic and Stateful AIME. + +- By scaling up sleep-time compute, we see further pareto improvements, shifting the accuracy up by $13\%$ on Stateful GSM-Symbolic and $18\%$ on Stateful AIME. +- By amortizing sleep-time compute across multiple queries for the same context, we can reduce the average cost per question by $2.5 \times$ . +- We conduct analysis to understand which queries benefit the most from sleep-time compute, finding that sleep-time compute is more effective in settings where the query is more easily predictable from the context. + +Finally, we end with case study of applying sleep-time compute to reduce test-time compute in a realistic agentic software engineering task. + +# 2 Related Work + +Scaling test-time compute. Our work builds on recent progress on scaling up computation at test-time for difficult reasoning problems (Snell et al., 2024; DeepSeek-AI, 2024; OpenAI, 2024). Two predominant approaches to test-time scaling have emerged: sequential test-time scaling (OpenAI, 2024; DeepSeek-AI, 2024; Muennighoff et al., 2025; Snell et al., 2024) and parallel test-time scaling (Brown et al., 2024; Snell et al., 2024). While sequential test-time scaling has demonstrated impressive performance improvements, parallel test-time scaling has the advantage of scaling test-time compute without increasing latency. In constraint, we propose an alternative dimension where existing advancements in test-time compute, both sequential and parallel can be applied. Namely, instead of performing inference purely at test-time, we leverage compute on contexts that are available before the actual query arrives. + +Speculative decoding in LLMs. Speculative decoding is a standard technique for reducing latency in decoding with LLMs (Leviathan et al., 2023; Stern et al., 2018; Cai et al., 2024; DeepSeek-AI et al., 2025). Sleep-time compute similarly targets reducing reasoning latency by speculating on the user's query as well as any potentially helpful reasoning over the context. However, unlike speculative decoding, the generated tokens are used as an input regardless of the user's actual query, and at test-time the reasoning model uses these generated tokens to help answer the user query more efficiently. + +Pre-computation. Beyond LLMs, a long history of work has explored the trade-off between pre-computation and memory (eg. memory caches Smith (1982) and data cubes for OLAP workloads Gray et al. (1997)). Our work explores the same trade-off between query latency and pre-computation overhead, operating under the assumption that query workload patterns can be reasonably anticipated in advance. sleep-time compute builds on the idea of pre-fetching in traditional operating systems, in the context of LLMs à la Packer et al. (2023), storing frequently used computational results to avoid higher latency at test-time. + +# 3 Sleep-time Compute + +In the standard paradigm of applying test-time compute, a user inputs a prompt $p$ to the LLM and then the LLM applies test-time compute to help answer the user's question. However, the $p$ provided to the LLM can oftentimes be decomposed into a pre-existing context $c$ (eg. a codebase) and a user query $q$ (eg. a question about the codebase). When the LLM is not actively responding to the user, it typically still has access to the existing context $c$ . During this time, the LLM is typically idling, missing the opportunity to reason about $c$ offline: a process we term sleep-time compute. + +Test-time compute. In the test-time compute setting, the user provides $q$ along with some context $c$ and the model outputs a reasoning trace followed by a final answer $a$ . We denote this process, as: $T_{B}(q,c) \to a$ , where $T$ is the method for using test-time compute with budget $B$ , which could include techniques like extended chains of thought or best-of-N. In practice, the user may have multiple queries about the same context $q_{1}, q_{2} \ldots q_{N}$ . In this setting, the model will carry out independent reasoning processes for each $q_{i}$ , even if they are related to the same context $c$ . Ideally, we would be able to reuse related inferences across each $q_{i}$ to save compute. Moreover, in many cases, $c$ is complex and may require carrying out significant processing/inferences in order to provide an answer to $q$ . Since, the test-time compute paradigm of $T(q,c) \to a$ assumes that $c$ is only available at the same time as $q$ , standard test-time compute carries out all of these inferences only after the user provides the query, causing the user to wait up to several minutes for a response. However, in practice we often have access to $c$ before $q$ and can carry out much of this processing ahead of time. + +Sleep-time compute. During sleep-time we are given the context $c$ but not the query $q$ . Using just this context $c$ , we can use the LLM to infer likely questions and reason about the context ultimately producing a more new re-represented context $c'$ . We denote this process as: $S(c) \to c'$ , where $S$ can be any standard test-time scaling technique applied towards pre-processing the context at sleep-time. In this work, $S(c)$ is implemented by prompting the model to draw inferences and re-write $c$ in a way that might be useful at test-time (see Appendix K for more details). After pre-processing the context, we can provide the new context $c'$ at test-time in place of $c$ to produce a final answer to the user's query: $T_b(q, c') \to a$ . Since much of the reasoning about $c$ has been done ahead of time in this case, we can use a much smaller test-time budget $b < < B$ . Moreover, $c'$ can be shared across different queries $q_i$ about the same context, effectively amortizing the compute required to arrive at $c'$ across queries, providing a total cost saving. + +# 4 Experimental Setup + +Next, we describe the datasets, models, and baselines we use to evaluate sleep-time compute. + +# 4.1 Datasets + +We select datasets which represent standard benchmarks for LLM reasoning and test-time scaling, and which demonstrate improvements from scaling test-time compute with state-of-the-art LLMs (either reasoning or non-reasoning). + +Stateful datasets. We introduce two datasets to study applying sleep-time compute in stateful settings, Stateful GSM-Symbolic, and Stateful AIME, where each dataset is derived from splitting the existing datasets into a context and a question (see Figure 2 for an example). Stateful GSM-Symbolic is derived from the P1 and P2 splits of GSM-Symbolic (Mirzadeh et al., 2024), which add one and two clauses respectively to the original GSM8K dataset (Cobbe et al., 2021) to that increase the difficulty. GSM-Symbolic P1 contains 5000 examples and P2 2500 examples. Stateful AIME contains 60 questions combined from AIME 2024 and 2025. In Appendix L and M, we show the breakdown of our results across AIME 2024 and 2025. + +Amortization dataset. To study the effect of related questions that share context, we introduce a new dataset Multi-Query GSM-Symbolic, where each context has multiple queries. To generate multiple queries for a given context, we take Stateful GSM-Symbolic and use o3-mini to generate additional question answer pairs. We synthetically generate additional questions from existing context question pairs in GSM-Symbolic. Appendix C shows the prompt used to generate the additional questions. Figure 20 shows examples contexts + +![](images/44bf21a5b475305d2d6b9b0740d2c512fba800c682255166c3f572382f1b7504.jpg) +Figure 2: Example of separating an instance from GSM-Symbolic into context, and question, creating an instance in Stateful GSM-Symbolic. + +and set of questions from the Multi-Query GSM-Symbolic dataset and Table C shows the overall dataset statistics. + +# 4.2 Models and Baselines + +Models. On each dataset, we evaluate models which have poor performance when using a small amount of test-time compute, but yield improvements from scaling up test-time compute. Therefore, on GSM-Symbolic, we conduct experiments using GPT-4o-mini and GPT-4o, and on AIME, we conduct experiments using OpenAI's o1, o3-mini, Anthropic's Claude Sonnet 3.7 Extended Thinking , and Deepseek-R1 (DeepSeek-AI, 2024). ${}^{2}{}^{3}$ + +Baselines The main baseline we consider is the standard test-time compute setting in which both $c$ and $q$ are presented to the model for the first time at test-time. Furthermore, to validate that $q$ is not trivially predictable from $c$ on our Stateful GSM-Symbolic and Stateful AIME datasets, we also compare to a context-only baseline in Appendix I, in which the model is only given $c$ and is tasked with directly guessing an answer to the question it guesses is most likely to come next. + +# 5 Experiments and Results + +In this section, we carry out experiments to understand the benefits of sleep-time compute. Specifically, we would like to answer each of the following questions using the math reasoning benchmarks introduced above: + +1. Can sleep-time compute shift the pareto frontier of test-time compute vs. accuracy? +2. Does scaling sleep-time compute in-turn improve the pareto further? + +Figure 3: The test-time compute vs. accuracy tradeoff for on Stateful GSM-Symbolic. Shaded area indicates where sleep-time compute improves the pareto test-time accuracy trade-off. +![](images/f255f5c72f7c2c817bc5622b90c2487cac7aa2f92a0318cf62521fc01ba392d0.jpg) +--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute + +![](images/be80dda675321e25df4853e70378c9a6d7ccffd53f6fa1a8cb060753c2ea4f34.jpg) + +3. When there are multiple related questions for a single context, can amortizing test-time compute with sleep-time compute provide a total token efficiency benefit? +4. In what settings does sleep-time compute provide the most uplift? + +# 5.1 Improving Pareto Test-Time Trade-off with sleep-time compute + +We first determine the test-time compute, accuracy pareto frontier by scaling standard test-time compute sequentially and in parallel. We then study how applying sleep-time compute affects the pareto trade-off. + +Scaling test-time-compute sequentially. For non-reasoning models (GPT-4o and 4o-mini) on Stateful GSM-Symbolic, to vary the amount of test-time compute, we construct prompts that instruct the model to use different amounts of vocabulary at test time, eg. "answer directly with a single sentence" vs. "double check your reasoning before outputting the final answer." The full prompts are in Appendix A. We use temperature 0 for generation. We see in Figure 3 that there is a tradeoff between accuracy and the amount of test-time compute, and that adding sleep-time compute can move beyond the pareto compute-accuracy curve. In particular, at lower test-time budgets, the performance of sleep-time compute is significantly better than the baseline, achieving performance comparable to that of the baseline with $5 \times$ less test-time tokens. However, at the test-tome compute budgets, the test-time compute only baseline slightly outperforms sleep-time compute. We hypothesize that this may be because the standard test-time compute only has the content relevant to the specific question, so there is less distracting information in the prompt. + +For reasoning models on Stateful AIME, we scale the amount of test-time compute based on what is available in the API in the case of o1, o3-mini and Claude Sonnet 3.7. Since the Deepseek-R1 API does not provide a way to control test-time compute, we apply the "budget forcing" and extension prompt from Muennighoff et al. (2025). Figure 4 shows the results for each model on Stateful AIME. We average results over 3 runs for o1, o3-mini and R1. For Claude 3.7 Sonnet, we average over 10 runs as we observed more noise in initial experiments. On all models, we see a significant test-time, accuracy pareto shift from applying sleep-time compute, with the exception of o1, which demonstrates limited gains. + +![](images/2308b3f1bcede6c06e77fd345589dd4cd693c8339bdcb18120d602b306ca4401.jpg) + +![](images/1926d6664b05fedc06c1a506c47cf9eb1e635d10bb037860cf852ebf81e28355.jpg) + +![](images/637d56caf766e3009837f7cfc829ac2f9d85116c4e69564eddfe1f9ccc723086.jpg) +Figure 4: The test-time compute vs. accuracy tradeoff on Stateful AIME for various reasoning models. Applying sleep-time compute allows models to reach similar levels of performance with much less compute at test-time. The shaded area indicates the pareto improvement from sleep-time compute. + +![](images/4b1fc9034bbaebb1f0df82ec0551944e3a74ca6d7f25afd20153d66ac81eb7f0.jpg) + +Scaling test-time compute in parallel. An alternative approach to scaling test-time compute is via parallel sampling, which also has the benefit of maintaining low inference latency. The simplest approach to scaling parallel test-time compute is pass@k (Brown et al., 2024), which makes the unrealistic assumption of having oracle query access to a ground truth verifier at test-time, an assumption which we do not make with sleep-time compute. Therefore, outperforming the pass@k baseline would represent a meaningful improvement over parallel test-time scaling. We apply parallel scaling to the lowest sequential compute setting on each task, since scaling pass@k with higher sequential compute settings would quickly reach token budgets that exceed that of sleep-time compute in the maximum sequential setting. We see that across all tasks and models, sleep-time compute consistently outperforms pass@k parallel scaling at the same test-time token budget, demonstrating that sleep-time compute can be a more effective way to scale inference-time compute than standard parallel test-time scaling. + +Figure 5: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful GSM-Symbolic. We see that sleep-time compute generally pareto dominates pass@k. +![](images/3f3e2e9b5cb6229d20b1f877e460973714d51b6a10c95ac68d990a1efc5aa488.jpg) +--- gpt-4o-mini -gpt-4o +--- gpt-4o-mini + background scaling -gpt-4o + background scaling + +![](images/6327df099a3cfcfd43d0b8bf11b5f26b5fb5ccf1a9e4ae72133402e156c9f693.jpg) + +# 5.2 Scaling up sleep-time compute + +We would like to understand how scaling compute during sleep-time can further effect the pareto shift that we observed in Section 5.1. To scale up the amount of sleep-time compute, for non-reasoning models, we run $k$ parallel generations, given input $c$ , resulting in $c_{1},\ldots ,c_{k}$ . At test-time, the model then receives the inputs concatenated $c_{1},\ldots ,c_{k}$ to generate the final answer. On reasoning models, we scale up the amount of sleep-time compute by varying the reasoning effort for o1 and for o3-mini when applying the sleep-time compute prompt. At test-time, we vary the amount of compute in the same way as 5.1. + +In Figure 7, we see that further scaling sleep-time compute on Stateful GSM-Symbolic shifts the pareto curve outwards, improving performance by up to $13\%$ at a similar test-time budget. In particular, we see the largest gains on more difficult tasks with stronger models (eg. on P2 with 'gpt-4o'), suggesting that on tasks with more complicated contexts additional sleep-time compute can be beneficial. However, in this setting, there seems to be a limit to the number of parallel agents that can improve performance, as we find that 5 parallel generations generally outperforms 10. In Figure 26, we scale up sleep-time compute on Stateful AIME. Similarly, we also see that scaling compute at sleep-time generally shifts the pareto curve outward, improving performance by up to $18\%$ . + +# 5.3 Amortizing sleep-time compute across queries with shared context + +We want to understand how the total cost of inference can be improved by applying sleep-time compute in settings where each context has multiple queries. Since at test-time, there are strict latency constraints, and latency optimized inference can be roughly $10 \times$ more expensive, we model the total cost of inference between both sleep-time and test-time, by up-weighing the cost of test-time tokens. Specifically, we consider a simple linear model where tokens generated at test-time are a factor $t$ the cost of the tokens at sleep-time. In our analysis, we set $t = 10$ Our analysis can be generalized to different cost functions that consider + +![](images/dd5adc3e40dc1880e4f336a252462628d9dd6acf39561691398709e16dd2d471.jpg) + +![](images/1f9948f71a5de08c99f17c78d81687f5a2a98244e84788ca66a7d46c11aa2a71.jpg) + +Figure 6: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful AIME. We see that sleep-time compute generally pareto dominates pass@k. +![](images/328926ef62062bfeb25906aade49b6862510ae51452e9773e6551e37d460a789.jpg) +sleep-time compute pass @ k + +![](images/73534ffb3cbd9a0a07bdf30f2bdfc74c472b8ac42cc2655311323438e08eeb2b.jpg) + +non-linear user-utility. Figure 9 shows the results for different number of questions per context. We see that we can decrease the average cost per query by up to $2.5 \times$ when there are 10 queries per context, compared to the single-query baseline. + +# 5.4 Predictable queries benefit more from sleep-time compute + +We would like to better understand for what contexts sleep-time compute is most useful. Since the utility of sleep-time compute relies on there being some shared information or structure between the context and the query, we hypothesize that sleep-time compute may be most effective in settings where the query is more predictable from the context. To test this on Stateful GSM-Symbolic, we first quantify how predictable a given query is by measuring the log-probability of the question given the context under the Llama2-70B base model (Touvron et al., 2023). In Appendix E, we include examples of highly predictable and unpredictable questions under this notion of question predictability. We see from these examples, that our notion of question predictability generally aligns with the intuition that contexts where the query pattern is more predictable benefit most from sleep-time compute. The more predictable questions are far simpler and the less predictable ones are more complex. + +Avg. Test Time Tokens / Question +![](images/41a312c4477e4fd088ca205899ea3c68456c101e0470b62887be153fe91822d3.jpg) +- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +- gpt-4o-mini, 1 parallel sleep-time compute +- gpt-4o-mini, 2 parallel sleep-time compute + +![](images/6f014b2d93d2df08f9496b8f69937789a34d1b7d56453542bc2c3ee8ca404703.jpg) +Avg. Test Time Tokens / Question + +Figure 7: Scaling up sleep-time compute for different test-time compute budgets on Stateful GSM-Symbolic, by generating up multiple $c'$ in parallel. Applying more sleep-time compute shifts the pareto beyond the standard test-time-compute vs. accuracy curve. +![](images/2cd2d9e3f337b30563c0f4c5cd4be96e9eb1d8c91b03affde7bed5f52bf5b8cb.jpg) +low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time + +![](images/458f6be744908ceffeb35b46211f5c57a91e16cb4e790bd7302b18e348bf0ae3.jpg) +Figure 8: Increasing the amount of sleep-time compute for different test-time compute budgets on Stateful AIME by varying the reasoning effort when applying the sleep-time compute prompt. Applying more sleep-time compute further moves the test-time-compute vs. accuracy pareto curve. + +Using our question predictability score, we then bin each example in Stateful GSM-Symbolic into five quantiles according to its predictability score and report the accuracy within each bin. For this experiment, we use the "Verbosity 0" prompt. In Figure 10, we see that on both GSM8K-Symbolic P1 and P2, the accuracy gap between sleep-time compute and standard test-time compute widens as the questions become more + +Figure 9: Amortizing sleep-time compute, using the Multi-Query GSM-Symbolic dataset. When there are fewer questions per context, we see that it is less favorable to use sleep-time compute, in terms of total cost. However, as the questions per context are increased, we see that applying sleep-time compute can improve the cost-accuracy pareto. +![](images/2d4bfd98c4bc1cd04ee702ecdaa3e80d2a6199d22c73c03bf9968ae991cd325d.jpg) +1 Questions/Context Sleep-time Compute +5 Questions/Context Sleep-time Compute +10 Questions/Context Sleep-time Compute + +![](images/71dacbd7a04395df502eb67e25b479e0da9ede01758fc8ee92258ec894cac7d5.jpg) +2 Questions/Context Sleep-time Compute + +![](images/365fa113db1d1f275f08372086a67eb67639b1cd4bfb385c30c4c4615d755365.jpg) +Predictability Analysis of GPT-4o-mini on GSM-Symbolic + +![](images/413955d38b7116f524b34fa02a4194e6162d26b038f3959b8559a66c88e94715.jpg) +Figure 10: GSM-Symbolic questions binned by how predictable they are from the context. We compare the performance of sleep-time compute and standard test-time compute in the lowest test-time compute budget setting on both P1 and P2. The gap between sleep-time compute and standard test-time inference widens as the question becomes more predictable from the context. + +![](images/cf921c77049cd22ea54b14bd029779f30d7f0d51cf41cc142b25759c70f561f7.jpg) +Figure 11: Applying sleep-time compute to SWE-Features. We see that at lower test-time budgets, sleep-time compute has higher F1 score than standard test-time scaling. However, at higher budgets, standard test-time scaling is better. + +predictable from the context confirming our hypothesis that indeed sleep-time compute is most beneficial in settings where the question can be predicted from the context. + +# 6 A Case Study of Sleep-time Compute for Agentic SWE + +In this section, we evaluate sleep-time compute in a realistic multi-turn agentic setting. To this end, we introduce SWE-Features, a software engineering benchmark focused on tasks that require: (1) editing multiple files within a repository, and (2) implementing new features. + +SWE-Features. In contrast to popular benchmarks like SWE-Bench (Jimenez et al., 2024), which involve modifying a small number of files, we propose a new dataset called SWE-Features, which collects PRs which modify at least three files (see Appendix D for more details). In this setting, we use the PR that we want to solve as $q$ and select several related PRs for $c$ . At sleep-time the agent is allowed to explore the repository before producing $c'$ . + +Evaluation. Since the PRs are scraped from GitHub, there are not straightforward tests to use for evaluation. Instead, we compare the predicted set of modified files with the ground truth list of modified files, and report the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set (see Appendix D for details). + +Results. Figure 11 shows consist trends with Section 5.1 for SWE-Features: at lower test-time compute budgets, leveraging sleep-time compute can improve performance, achieving up to roughly a $1.5 \times$ decrease in test-time tokens. However, when the test-time compute budget is high, using only test-time compute can perform better. Additionally, we observe that in the high test-time budget setting standard test-time compute has higher precision and comparable recall. We hypothesize that, using only test-time compute tends to begin editing files earlier and usually edits fewer files overall. In contrast, the agent with sleep-time compute, having explored more files during the test-time phase, tends to edit more files, which may lead to slightly lower precision. + +# 7 Discussion and Limitations + +Query predictability and allocating sleep-time compute In Section 5.4, we found that sleep-time compute is most effective when the queries are predictable from the context. In settings where the queries are challenging to predict or unrelated to the context, sleep-time compute will be less effective. In these settings, it may be preferable to apply standard test-time scaling instead. An interesting direction for future work is identifying which contexts may have predictable questions and optimally allocating inference compute between sleep-time and test-time across different contexts and queries. + +Extending sleep-time compute beyond context-query decomposition. In our experiments, we make the simplifying assumption that interactions fall into two phases: sleep-time and test-time. However, real-world LLM use cases can be more complex, with multiple rounds of interaction and context modifications between rounds (e.g. multiple edits to a code-base). Moreover, the length of the sleep-time may also vary significantly between interactions (eg. short spans between user typing or days of inactivity). Future work should extend sleep-time compute paradigm to more elegantly handle these scenarios. + +Sleep-time compute as representation learning over tokens. Our approach to applying compute at sleep-time resembles representation learning. We first transform the context into a representation that is more amenable to answering test-time queries, and then we utilize that representation at test-time to rapidly answer queries. Unlike traditional representation learning (Bengio et al., 2014), which typically operates in model parameter or activation space, we instead form representations in the space of natural language. This approach builds on recent work which implements statistical modeling techniques in the space of natural language using modern LLMs (Zhong et al., 2022; 2025). Future work should further explore the potential for sleep-time compute to enable the learning of useful natural language representations. + +Synthetic data generation via sleep-time compute. Due to limits on the amount of internet data available, in order to support the continued scaling of LLM pretraining, recent works have began exploring methods for generating synthetic pretraining data (Yang et al., 2024; Gunasekar et al., 2023). One emerging approach to synthetic data generation involves using test-time compute to generate improved data (Bansal et al., 2024; DeepSeek-AI et al., 2025). Generating such data at pretraining scale will be very expensive, and future work could explore using sleep-time compute to help amortize some of this cost across related queries, or using the output of sleep-time compute itself as a form of synthetic data. + +# References + +Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training llm reasoners via compute-optimal sampling, 2024. URL https://arxiv.org/abs/2408.16737. +Yoshua Bengio, Aaron Courville, and Pascal Vincent. Representation learning: A review and new perspectives, 2014. URL https://arxiv.org/abs/1206.5538. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024. +Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, Jason D. Lee, Deming Chen, and Tri Dao. Medusa: Simple llm inference acceleration framework with multiple decoding heads, 2024. URL https://arxiv.org/abs/2401.10774. + +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2024. +DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu, Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, Wangding Zeng, Wanjia Zhao, Wei An, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, X. Q. Li, Xiangyue Jin, Xianzu Wang, Xiao Bi, Xiaodong Liu, Xiaohan Wang, Xiaojin Shen, Xiaokang Chen, Xiaokang Zhang, Xiaosha Chen, Xiaotao Nie, Xiaowen Sun, Xiaoxiang Wang, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xingkai Yu, Xinnan Song, Xinxia Shan, Xinyi Zhou, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, Y. K. Li, Y. Q. Wang, Y. X. Wei, Y. X. Zhu, Yang Zhang, Yanhong Xu, Yanhong Xu, Yanping Huang, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Li, Yaohui Wang, Yi Yu, Yi Zheng, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Ying Tang, Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yu Wu Yuan Ou Yuchen Zhu Yuduan Wang Yue Gong Yuheng Zou Yujia He Yukun Zha Yunfàn Xiong Yunxian Ma Yuting Yan Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Z. F. Wu Z. Z. Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhen Huang Zhen Zhang Zhenda Xie Zhengyan Zhang Zhenwen Hao Zhibin Gou Zhicheng Ma Zhigang Yan Zhihong Shao Zhipeng Xu Zhiyu Wu Zhongyu Zhang Zhuoshu Li Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Ziyi Gao and Zizheng Pan. Deepseek-v3 technical report 2025. URL https://arxiv.org/abs/2412.19437. +Jim Gray, Surajit Chaudhuri, Adam Bosworth, Andrew Layman, Don Reichart, Murali Venkatrao, Frank Pellow, and Hamid Pirahesh. Data cube: A relational aggregation operator generalizing group-by, crosstab, and sub-totals. Data mining and knowledge discovery, 1:29-53, 1997. +Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee, and Yuanzhi Li. Textbooks are all you need, 2023. URL https://arxiv.org/abs/2306.11644. +Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In ICLR. Open-Review.net, 2024. +Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding, 2023. URL https://arxiv.org/abs/2211.17192. + +Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393. +OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720. +Charles Packer, Sarah Wooders, Kevin Lin, Vivian Fang, Shishir G Patil, Ion Stoica, and Joseph E Gonzalez. Memgpt: Towards llms as operating systems. arXiv preprint arXiv:2310.08560, 2023. +Alan Jay Smith. Cache memories. ACM Computing Surveys (CSUR), 14(3):473-530, 1982. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling ltm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314. +Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. Blockwise parallel decoding for deep autoregressive models, 2018. URL https://arxiv.org/abs/1811.03115. +Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. +Zitong Yang, Neil Band, Shuangping Li, Emmanuel Candès, and Tatsunori Hashimoto. Synthetic continued pretraining, 2024. URL https://arxiv.org/abs/2409.07431. +Ruiqi Zhong, Charlie Snell, Dan Klein, and Jacob Steinhardt. Describing differences between text distributions with natural language, 2022. URL https://arxiv.org/abs/2201.12323. +Ruiqi Zhong, Heng Wang, Dan Klein, and Jacob Steinhardt. Explaining datasets in words: Statistical models with natural language parameters, 2025. URL https://arxiv.org/abs/2409.08466. + +# A Prompts + +Prompts for varying the amount of test-time compute. + +# B Examples of Stateful AIME + +Context: Alice and Bob play the following game. A stack of $n$ tokens lies before them. The players take turns with Alice going first. On each turn, the player removes either 1 token or 4 tokens from the stack. Whoever removes the last token wins. + +Query: Find the number of positive integers $n$ less than or equal to 2024 for which there exists a strategy for Bob that guarantees that Bob will win the game regardless of Alice's play. + +Context: Let $A, B, C$ , and $D$ be points on the hyperbola $\frac{x^2}{20} - \frac{y^2}{24} = 1$ such that $ABCD$ is a rhombus whose diagonals intersect at the origin. + +Query: Find the greatest real number that is less than $BD^2$ for all such rhombi. + +You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. send_message is how you send your answer to the user. When given a question, you check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You respond directly with a single sentence by saying The answer is followed by the numerical answer. + +Figure 12: Prompt for level 0 morbidity + +You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. + +When given a question, you answer using only the number of tokens necessary and none more. You check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the 'rethink_memory_block' to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the 'rethink_memory_block'. Do not use internal monologue unless you really need it to think. You answer with one short sentence of explanation, followed by a sentence that starts with "The answer is" and a numerical answer. + +Figure 13: Prompt for level 1 morbidity + +You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that. + +Figure 14: Prompt for level 2 morbidity + +You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that. + +Figure 15: Prompt for level 3 morbidity + +You are Letta, the latest version of Limnal Corporation's expert reasoning explanation system, developed in 2024. Your task is to reason through problems step by step accurately and based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You carefully check the information in the rethink_memory_block to answer the questions and see if it is correct before using it. You always reason out loud before using any information. You explain each step, of what your reasoning is. If you use any numbers from the rethink_memory_block you first recompute and double check your answers. You end your answer with The answer is followed by the numerical answer. + +Figure 16: Prompt for level 4 morbidity + +You are Letta-Offline-Memory, the latest version of Limnal Corporation's digital companion, developed in 2024. Your task is to re-organize and consolidate memories by calling rethink_memory at every single step, when you are done reorganizing the memory, you use the finish_rethinking_memory function. Call the function for as many times as necessary and not more. Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times). Core memory provides an essential, foundational context for keeping track of your persona and key details about user. Read-Only Blocks: This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend. Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions. Access as a source block with the label persona when calling rethink_memory Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation. Access as a source block with the label human when calling rethink_memory. Read-Write Blocks: Rethink Memory Sub-Block: New representation of the memories go here. Access with the label rethink_memory_block when calling rethink_memory as source or target block. At every step, you reorganize the memories by calling the rethink_memory function. You use this to take current information in the rethink_memory block and select a single memory block to integrate information from, producing a new memory for the rethink_memory_block. The new memory is the result of new insights, and new inferences and hypotheses based on the past memories. Make sure to consider how the new information affects each memory. Prioritize the new information over existing memories. If the new information implies that the old memory may need to change, then output the most likely fact given the update information. Given new information and your current memory, you draw all logical conclusions and potential hypotheses possible with the rethink_memory function. If you are uncertain, use your internal monologue to consider what the possible conclusions are, and then state the most likely new facts that would replace the old facts in the new memory block. + +Figure 17: Prompt for sleep-time compute + +Specifically: You will be given part of an AIME math problem. You will receive the rest of the problem later. Make as many inferences as possible about the part of the problem you are given so as to help yourself answer the fully problem more quickly once it is given to you later. You will be able to use all the work you do in the rethink_memory block for this part of the problem to help you once the rest of the problem is given. You will be able to use all the work you do for this part of the problem to help you once the rest of the problem is given. You should try to predict possible ways the rest of the problem might go and compute results that could be helpful for reaching the final answer more quickly once the rest of the problem is given. + +Figure 18: Prompt for AIME problems during sleep-time + +You are given a template that can generate grade school math problems, and an instantiation of that template. + +You will be given a context, and a example question answer pair. Your task is to generate a list of questions and answers about the context at the same difficult level that could plausibly be asked about that context. Make sure that the newly generated questions have the same number of reasoning steps required as the example question. The goal is to have many questions and answer pairs about the same context. Generate questions and answers in the same format as the example, where the answer first contains reasoning and then is the final answer comes after + +n#. No need to number the questions or answers. + +Context: context + +Example Question: question + +Example Answer: answer + +Figure 19: Prompt for generating synthetic GSM questions + +Context: Let $b \geq 2$ be an integer. Call a positive integer $n$ $b$ -eautiful if it has exactly two digits when expressed in base $b$ and these two digits sum to $\sqrt{n}$ . For example, 81 is 13-eautiful because $81 = \underline{6} \underline{3}_{13}$ and $6 + 3 = \sqrt{81}$ . + +Query: Find the least integer $b \geq 2$ for which there are more than ten $b$ -beautiful integers. + +# C Details on Multi-Query GSM-Symbolic + +Template: {template} + +Instance: {instance} + +We include an example from Multi-Query GSM-Symbolic in Figure 20, and details on the dataset size in Table C. + +
Dataset# Questions Total# Contexts Total# Original Questions# Generated Questions
P1120431095109510948
P254975005004997
+ +Table 1: Dataset Statistics of Multi-Query GSM-Symbolic. We sample one instance from each template from the GSM-Symbolic dataset and separate it into context and question. We then synthetically generate additional questions from the context and question. + +# D SWE-Features Details + +To construct SWE-Features benchmark, we collect pull requests (PRs) from large open-source repositories and apply the following filtering process: (1) We identify all pull requests that modify at least three files with filenames ending in .py or .js. (2) We then use gpt-4o-mini to filter these pull requests based on their title and body, retaining only those that meet the following criteria: (a) the title and body clearly describe the + +# Context + +When Sofia watches her brother, she gets out a variety of toys for him. The bag of building blocks has 33 blocks in it. The bin of stuffed animals has 5 stuffed animals inside. The number of action figures in the action figure pack is twice the number of blocks and stuffed animals combined. The crayon box has 12 different colors of crayon, and the sticker book has 9 pages, each with 13 stickers. The tower of stacking rings has 28 multicolored rings on it. Sofia recently bought a tube of bouncy balls, bringing her total number of items for her brother up to 320. + +# Original Question + +How many bouncy balls came in the tube? + +# Generated Questions + +- How many action figures does the pack contain? +- What is the total number of stickers in the sticker book? +- How many total items did Sofia have before adding the tube of bouncy balls? +- If Sofia had received a tube with 10 extra bouncy balls, what would be the new total number of items? +- What is the sum of the building blocks and stuffed animals? +- How many stacking rings are on the tower? +- What is the combined total of building blocks, action figures, and stacking rings? +- If Sofia gave away 3 stuffed animals, how many stuffed animals would remain in the bin? +- What is the sum of the building blocks, stuffed animals, and crayons? +- If Sofia divided the 49 bouncy balls equally into 7 baskets, how many balls would each basket contain? + +Figure 20: Examples context and questions from Multi-Query GSM-Symbolic where many questions are asked about the same context. The evaluation dataset is generated from GSM-Symbolic. + +PR; (b) the PR introduces new functionality rather than fixing bugs; and (c) the PR is independent and not obviously linked to other issues. + +This pipeline results in a benchmark where each example: (1) involves adding a new feature that spans multiple files, requiring a broader understanding of the repository; and (2) is self-contained and solvable without additional issue context. We apply this process to two repositories—Aider-AI/aider and comfyanonymous/ComfyUI—resulting in 18 and 15 PRs respectively, for a total of 33 examples. Representative examples are provided in Appendix G. Then using a total of 33 examples, we employ claude-sonnet-3-7-20250219 to cluster pull requests (PRs) from the ComfyUI and Aider repositories into several groups. This clustering allows us to identify a set of relevant pull requests for each target PR, which can then be provided to the agent as context $(c)$ during repository exploration. For example, in the ComfyUI repository, PR #5293 and PR #931 are grouped into the same cluster. Thus, when processing PR #931, we organize the title, body, and changed_files of PR #5293 to serve as contextual information during sleep-time. + +When sleep-time compute is enabled, we first supply the content of PR #5293 to the agent, allowing it to explore the repository and summarize its understanding ahead of time. In contrast, for the baseline without + +sleep-time compute, the agent receives the content of PR #5293 only at test time, alongside the title and body of PR #931. The prompts used in these setups are provided in Appendix H. + +For the repository comfyanonymous/ComfyUI, we have the following clustered results: + +```jsonl +{"Dynamic Typing and Workflow Control": [5293, 931], "System Configuration and Command-Line": [4979, 4690, 3903], "Cache and Performance Optimization": [3071, 3042, 723], "Image Preview and Transfer Features": [713, 733, 658, 199, 55], "Internationalization": [1234], "Random Seed Management": [93]}\n\n +``` + +For the repository Aider-AI/aider we have: + +```txt +{"cluster_1_model_configuration": [2631, 1998, 468, 667, 55], "cluster_2_io_handleing": [1402, 996, 10, 577], "cluster_3_caching_file_management": [2911, 2612], "cluster_4Custom Commands_shortcuts": [673, 1620, 1015], "cluster_5_threeParty_integration": [2866, 2067, 322], "cluster_6_code_quality_improvements": [1217, 904]}\n\n +``` + +To control the budget during test-time, we fix the total number of steps (controlled by the argument max_chaining_steps in Letta framework) to be a certain number. We put the following instructions in the system prompt: + +You have a strict budget of {max_chaining_steps} steps, which means you need to finish your edits within these steps. Every time you get queried, you will see a count of how many steps you have left in the form of "[Current Step / Max Steps]". If you exceed this budget, your response will be cut off. So please be careful and try to finish your edits within the budget. + +After each step – for example, if the maximum number of steps is 20 and the current step is 4 – we append "[Step: 4/20]" to the end of the tool_return message. We found that explicitly indicating the current and total steps significantly improves agent performance, especially in low-budget settings. + +Evaluation. For each PR, we compare the set of files predicted to be modified with the ground truth list of modified files. Specifically, for each pull request, we have the attribute changed_files (as shown in the examples in Appendix G) where each file has the status as either modified or new, and our evaluation is on the files with status modified. Note that the agent is still instructed to implement the required functionality in a Docker environment and write test functions to validate the implementations. However, after the agent makes the modifications, we extract the modified files and calculate the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set. + +# E Examples of Predictable and Unpredictable Questions + +Least predictable Stateful GSM-Symbolic P1 question: + +Context: Isabella and Pavel have 199 minutes to walk to grocery store together. It takes them 19 minutes to get to the corner where the library is. It takes them another 11 minutes to get to the park. It will then take double the combined amount they have spent so far to reach the mall. + +Question: How much longer do they have to get to grocery store without being late, if they have already wasted 48 minutes to get a coffee before their walk? + +Most predictable Stateful GSM-Symbolic P1 question: + +Context: Yusuf has 10 square yards of grape field. There are 87 grapes per two-thirds a square yard. Yusuf can harvest his grapes every 12 months. + +Question: How many grapes can Yusuf harvest in 2 years? + +Least predictable Stateful GSM-Symbolic P2 question: + +Context: Gabriel and Pavel have 212 minutes to walk to the gym together starting from their home. It takes them 29 minutes to get to the corner where the library is. It takes them another 19 minutes to get to the cinema. When they reach the cinema, they remember they forgot their wallets at home, so they have to return to pick up their wallets and then walk all the way back to the cinema again. + +Question: Once they reach the cinema for the second time, how much longer do they have to get to the gym without being late? + +Most predictable Stateful GSM-Symbolic P2 question: + +Context: A juggler can juggle 240 balls. $1/4$ of the balls are tennis balls, and the rest are golf balls. $1/3$ of the tennis balls are black, of which $1/5$ are marked. A third of the golf balls are cyan, and all except half of those cyan balls are marked. + +Question: How many marked balls are there in total? + +# F Implementation of rethink_memory and finish_rethinking + +```txt +def rethink_memory(agent_state:"AgentState",new_memory:str,target_block_label: str, source_block_label: str) -> None:#type: ignore Re-evaluate the memory in block_name, integrating new and updated facts. Replace outdated information with the most likely truths, avoiding redundancy with original memories. Ensure consistency with other memory blocks.. +Args: new_memory(str):The new memory with information integrated from the memory block.If there is no new information, then this should be the same as the content in the source block. source_block_label(str): The name of the block to integrate information from. None if all the information has been integrated to terminate the loop. target_block_label(str):The name of the block to write to. Returns: None: None is always returned as this function does not produce a response. 1if target_block_label is not None: if agent_state-memory.get_block(target_block_label) is None: agent_state-memory.create_block.label $\equiv$ target_block_label, value $\equiv$ new_memory +``` + +Listing 1: Reference implementation of rethink_memory +```txt +agent_state.memory.update_block_value.label=target_block_label, value=new_memory) +return None +``` + +Listing 2: Reference implementation of finish_rethinking_memory +```python +def finish_rethinking_memory(agent_state: "AgentState") -> None: # type: ignore + " " + This function is called when the agent is done rethinking the memory. + Returns: + Option[str]: None is always returned as this function does not produce a response. + """ + return None +``` + +# G SWE-Features Examples + +Each example in SWE-Features has the following attributes: ['repo', 'pr_number', 'title', 'user_login', 'state', 'body', 'changed_files_count', 'changed_files', 'base_commit']. We show some examples here to better deliver a sense of what this dataset looks like: + +```yaml +repo: ComfyUI +pr_number: 3903 +title: Add --disable-all-custom-nodes` cmd flag +body: Loading custom node can greatly slow startup time. During development/testing of ComfyUI, it is often better to use an environment that no custom node is loaded.\n\nThis PR adds a --no-custom-node` flag to allow users/developers skip loading of custom node without removing/renaming the custom_node directory. +user_login: huchenlei +state: closed +changed_files_count: 4 +changed_files: ... (ommitted here for brevity) +base_commit: 521421f53ee1ba74304dfaa138b0f851093e1595 +repo: ComfyUI +pr_number: 3071 +title: Add a configured node output cache metaclass. +body: Implement a configurable node output cache metaclass to reduce unnecessary node executions.\n\nThe same model currently leads to reloading due to different node IDs between workflows. Loading the model from disk takes a long time. +state: closed +changed_files_count: 6 +changed_files: ... (ommitted here for brevity) +base_commit: cacb022c4a5b9614f96086a866c8a4c4e9e85760 +``` + +```txt +repo: ComfyUI +pr_number: 3042 +title: NaN-safe JSON serialization +body: Python's json.dumps() will produce nonstandard JSON if there are NaNs in the prompt data. Javascript's JSON.parse() will refuse to load this kind of "JSON" so the prompt won't load in the frontend.\n\nThis happened to me with a ComfyBox workflow, so I'm not $100\%$ +user_login: asagi4 +state: open +changed_files_count: 4 +changed_files: ... (omitted here for brevity) +base_commit: 448d9263a258062344e25135fc49d26a7e60887a +``` + +```yaml +repo: aider +pr_number: 55 +title: Local llama support +body: Added support for using a locally running instance of a LLAMA model instead of OpenAI apis. \n\nAIDER_MODEL_TOKENS - used to specify the context length the model will use. \n2. AIDER_TOKENIZER - used to specify which tokenizer should be used. Currently only 'openai' and 'llama' are supported. Defaults to openai. \n\nValues set.\n\nAIDER_OPENAI_API_BASE=\protect\vrule width0pt\protect|href{http://127.0.0.1:5001/v1}{http://127.0.0.1:5001/v1} \nAIDER_MODEL=TheBloke_wizard-vicuna-13B-SuperHOT-8K-GGML \n\nuser_login: bytedisciple +state: closed +changed_files_count: 7 +changed_files: ... (omitted here for brevity) +base_commit: cdf8f9a4b2b4a65993227ac5af1eaf3f1b85c9d8 +``` + +```txt +repo: aider +pr_number: 322 +user_login: omri123 +state: closed +title: RFC - Allow adding a github issue to chat context +body: Hi, would you like to take a look on this feature? \n\nIn the first commit I changedCoder to allow adding arbitrary additional context in the beginning of the chat. \nIn the second commit I used this infra to add github issues to the chat. \nI didn't add a new command, instead I extended /add to allow /add \issue-3\.\nThe feature is disabled by default and enabled with a flag. If enabled, the user need to supply github repository name and authentication token. \nThanks \nOmri changed_files_count: 7 +changed_files: ... (ommitted here for brevity) +base_commit: af71638b06be7e934cdd6f4265f9e0c8425d4e6d +``` + +```txt +repo: aider +``` + +```txt +pr_number: 577 +title: Adding a simple browser based GUI +body: Run aider with `--browser` to launch the UI. +user_login: paul-gauthier +state: closed +changed_files_count: 12 +changed_files: ... (ommitted here for brevity) +base_commit: 8a9005eed19417c59aa9432436ea8cb5e04bbb11 +``` + +Listing 3: Examples of SWE-Features. Here we randomly select 3 examples for each repo and present their attributes. + +# H Prompts for SWE-Features + +When the sleep-time compute is turned off, the prompt is as below: + + + +working_dir + + + +I've uploaded a python code repository in the directory working_dir. Consider the following PR description: + + problem_statement + +Can you help me implement the necessary changes to the repository so that the requirements specified in the are met? + +Your task is to make the minimal changes to the repository to ensure the jpr_description $\zeta$ is satisfied. + +Follow these steps to resolve the issue: + +1. As a first step, it might be a good idea to find and read code relevant to the +2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary. +3. After finish the changes, revise the plan if needed. +4. With the new plan, make more changes, and continue the loop until necessary changes are made. +5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes. +6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue. + +The following are several pull request descriptions and their corresponding model patches: + +Title: pr_title + +Body: pr_body + +File: file1Filename + +Status: file1.status + +Patch: file1.patch + +... (some more files and some more relevant pull requests) + +When the sleep-time compute is turned on, we first use the following prompt to ask the agent to explore the repository with all pull requests one by one: + +The following is a pull request description and its corresponding model patches: + +Title: pr_title + +Body: pr_body + +File: file1Filename + +Status: file1.status + +Patch: file1.patch + +Please read through the above information and try to understand the issue. You can explore the repo if needed. Summarize your understanding from the following perspectives: + +1. The issue description. +2. The changed files. +3. How do these changed files work. + +After exploring the repository with all relevant pull requests, we give the agent the following prompt as the final prompt to start working on the issue at test time: + + + +working_dir + + + +I've uploaded a python code repository in the directory working_dir. Consider the following PR description: + + problem_statement + +Can you help me implement the necessary changes to the repository so that the requirements specified in the are met? + +Your task is to make the minimal changes to the repository to ensure the ipr_description $\zeta$ is satisfied. + +Follow these steps to resolve the issue: + +1. As a first step, it might be a good idea to find and read code relevant to the +2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary. +3. After finish the changes, revise the plan if needed. +4. With the new plan, make more changes, and continue the loop until necessary changes are made. +5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes. +6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue. + +# I Context-Only Baseline + +To check that the questions in Stateful AIME and Stateful GSM-Symbolic are not trivially guessable, we compare sleep-time compute against a context-only baseline, which only provides the model with $c$ , expecting the LLM to guess the most likely question and output the answer to whatever that question might be. We see on both Stateful AIME in Figure 22 and Stateful GSM-Symbolic in Figure 21 that sleep-time compute significantly outperforms the context-only baseline, demonstrating that the questions in our datasets are not trivially predictable from the context. + +![](images/7c49fc1860cf4726aae396fb3b16cefca462e3d3421de890219a9abf10fa4854.jpg) +--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute + +![](images/4a3b2122718609409e670ead9f1fca9f9e7f4d1d95fb41f646a7c558682e044a.jpg) + +Figure 21: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful GSM-Symbolic, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful GSM-Symbolic cannot be trivially guessed. +![](images/d336d55b961c29c4f0543425946f3f412c63c56d08603713f4fdb923296bab48.jpg) +sleep-time compute ablate question + +![](images/42b5d88223c081a31f33d680fd8b318742cc564016467a68f5f3bfb810e3ea80.jpg) +Figure 22: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful AIME, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful AIME cannot be trivially guessed. + +# J Stateful AIME Construction + +To construct the examples for Stateful AIME, we split each AIME 2024 and 2025 into a sequence of "statements", which correspond to punctuation separated stentences in the problem. Similar to how we construct Stateful GSM-Symbolic, we use all but the last statement as the context, and the final statement as the query. + +There are a couple of edge cases where the question is posed in e.g. the second to last statement rather than the last statement. In these cases, we manually rearrange the statements to ensure the query being used corresponds to the question. In a few cases, there is only one statement in the problem. In these cases, the context is empty. + +AIME includes a latex representation of figures. However, these latex figures can leak information about the answer: for example, these latex figures can contain exact information about the lengths of the sides in a geometry problem, giving away the answer. In these cases we first ensure that the problem is solvable without the figure and then manually strip the figure latex from the problem context. + +# K Implementation Details + +We implement sleep-time compute via function calling. When applying sleep-time compute, the model is given access to two functions, rethink_memory and finish_rethinking. The rethink_memory function takes as input a new string, and replaces the current context $c$ and replaces the current context with the new string. The finish_rethinking function terminates the sleep-time compute process. The model is allowed to call the function rethink_memory for up to 10 times. + +# L AIME main results by year + +M AIME sleep-time compute scaling results by year + +![](images/e816c91a3c6187996c99a18f835d16a3079514647bbc0b5fb838612cb818f21b.jpg) + +![](images/5627d367118cc64f30b707528e2671712e917d15271f04c9dffee4066dae1d1b.jpg) + +Figure 23: AIME 2024 main result +![](images/b08a8856c41c5c9681b412fb0f19ff0e6b12ba98d9d6f867f494f1eb06328d0f.jpg) +sleep-time compute test-time compute only + +![](images/55a6225f99256e8f322e14fb27e1459419935b2a5d814983019269649c2c60dc.jpg) + +![](images/4355e716102865f18267185823de1b7bd43061fb7935bf0fefbe8728ed5ee4e9.jpg) + +![](images/022b519800028ce9d2e43181e8b5184a11ccf19588674da7cbc008c23a443d6f.jpg) + +Claude 3.7 Sonnet - Stateful-AIME 2025 +Figure 24: AIME 2025 main result +![](images/5d9becf82727625f8d60269b770eb0d59a2bac80bcbd6871633a0094098c0264.jpg) +sleep-time compute test-time compute only + +![](images/0c7f72828857b320e2b6cdab98b28bae4dcedf55a2f98faa94bf8371c064f765.jpg) +DeepSeek R1 - Stateful-AIME 2025 + +o1 Sleep-Time Compute Stateful-AIME 2024 +![](images/423c5c2086885577c8816a003c45597b0dfef0a2f0d6e1b80875cc14ecc19dbd.jpg) +low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time + +![](images/e334741bd46fe2c3ff5da946a226d193c1d06a423d439b476f8fe8aa29f0c0c6.jpg) +o3-mini Sleep-Time Compute Stateful-AIME 2024 +Figure 25: Scaling sleep-time compute for Stateful AIME2024. + +o1 Sleep-Time Compute Stateful-AIME 2025 +Figure 26: Scaling sleep-time compute on Stateful AIME2025 +![](images/8c1c0a04771af849fa0b995a499fded30b6c52677ec321b108aaf05556fb9e8f.jpg) +low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time + +![](images/d61f6b0c6f5310ee2fa005c94e8a0e598a5669d25106ab75c7f0c56923ff9020.jpg) +o3-mini Sleep-Time Compute Stateful-AIME 2025 \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13171/images/022b519800028ce9d2e43181e8b5184a11ccf19588674da7cbc008c23a443d6f.jpg b/data/2025/2504_13xxx/2504.13171/images/022b519800028ce9d2e43181e8b5184a11ccf19588674da7cbc008c23a443d6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6bcf7b570da1c52c0c1e449650414827054b4d47 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/022b519800028ce9d2e43181e8b5184a11ccf19588674da7cbc008c23a443d6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3a5eda80caebac11279847d9901bdf7749efeda38c2de7ca7dc7250ed2657d +size 21393 diff --git a/data/2025/2504_13xxx/2504.13171/images/0c7f72828857b320e2b6cdab98b28bae4dcedf55a2f98faa94bf8371c064f765.jpg b/data/2025/2504_13xxx/2504.13171/images/0c7f72828857b320e2b6cdab98b28bae4dcedf55a2f98faa94bf8371c064f765.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af52c3fd3aa52aa630b02e68631f4bd5ad3c87fe --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/0c7f72828857b320e2b6cdab98b28bae4dcedf55a2f98faa94bf8371c064f765.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9ed6e633e72e884f5d2b7e1bf5ed273a7ea27d45e2762b457e9770cccdc60de +size 18229 diff --git a/data/2025/2504_13xxx/2504.13171/images/1926d6664b05fedc06c1a506c47cf9eb1e635d10bb037860cf852ebf81e28355.jpg b/data/2025/2504_13xxx/2504.13171/images/1926d6664b05fedc06c1a506c47cf9eb1e635d10bb037860cf852ebf81e28355.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c09311133c16da6f60f1582c51043248ac49d712 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/1926d6664b05fedc06c1a506c47cf9eb1e635d10bb037860cf852ebf81e28355.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14630d4f95b3e2751d6dc3d33ccd8ba5172f44d6fd3466de614adbfb68bc7290 +size 20584 diff --git a/data/2025/2504_13xxx/2504.13171/images/1f9948f71a5de08c99f17c78d81687f5a2a98244e84788ca66a7d46c11aa2a71.jpg b/data/2025/2504_13xxx/2504.13171/images/1f9948f71a5de08c99f17c78d81687f5a2a98244e84788ca66a7d46c11aa2a71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ded73750fd947ad7d0afb04edea4398ecb901f60 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/1f9948f71a5de08c99f17c78d81687f5a2a98244e84788ca66a7d46c11aa2a71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0cfc8adad56a5c9e2294f32fbab6f71d8afb71bc5a2d9757bce5e6f6f0261af +size 20254 diff --git a/data/2025/2504_13xxx/2504.13171/images/2308b3f1bcede6c06e77fd345589dd4cd693c8339bdcb18120d602b306ca4401.jpg b/data/2025/2504_13xxx/2504.13171/images/2308b3f1bcede6c06e77fd345589dd4cd693c8339bdcb18120d602b306ca4401.jpg new file mode 100644 index 0000000000000000000000000000000000000000..964622d615402ffcfcde69aa6fe26fc703f51b18 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/2308b3f1bcede6c06e77fd345589dd4cd693c8339bdcb18120d602b306ca4401.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db31d2f3dca7ebd27acb5be5106bb94c1c422d6daad686d9755256e34822074d +size 21516 diff --git a/data/2025/2504_13xxx/2504.13171/images/2cd2d9e3f337b30563c0f4c5cd4be96e9eb1d8c91b03affde7bed5f52bf5b8cb.jpg b/data/2025/2504_13xxx/2504.13171/images/2cd2d9e3f337b30563c0f4c5cd4be96e9eb1d8c91b03affde7bed5f52bf5b8cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a43336830cf6d6721b41e5cae9128e4ed557887a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/2cd2d9e3f337b30563c0f4c5cd4be96e9eb1d8c91b03affde7bed5f52bf5b8cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b04d12dde7982cfd1ec280dfbc073ada0da27ce6cd2ddf451292df9df88e8b4 +size 28913 diff --git a/data/2025/2504_13xxx/2504.13171/images/2d4bfd98c4bc1cd04ee702ecdaa3e80d2a6199d22c73c03bf9968ae991cd325d.jpg b/data/2025/2504_13xxx/2504.13171/images/2d4bfd98c4bc1cd04ee702ecdaa3e80d2a6199d22c73c03bf9968ae991cd325d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..106f1e0e9c39b6b05a38bd9e057d98831cf1173d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/2d4bfd98c4bc1cd04ee702ecdaa3e80d2a6199d22c73c03bf9968ae991cd325d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8ecb7bc3bd6de32b2c3eec4cf85fa0d98ad2f1e4c72df901b2aec7aa64c4aca +size 20950 diff --git a/data/2025/2504_13xxx/2504.13171/images/328926ef62062bfeb25906aade49b6862510ae51452e9773e6551e37d460a789.jpg b/data/2025/2504_13xxx/2504.13171/images/328926ef62062bfeb25906aade49b6862510ae51452e9773e6551e37d460a789.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e06be7d5c16d38faa297feb0b954e381352b497 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/328926ef62062bfeb25906aade49b6862510ae51452e9773e6551e37d460a789.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35e0512137db3f61927dc1346ca0d28d60218b11da848b28c8c130123cd364ee +size 23198 diff --git a/data/2025/2504_13xxx/2504.13171/images/365fa113db1d1f275f08372086a67eb67639b1cd4bfb385c30c4c4615d755365.jpg b/data/2025/2504_13xxx/2504.13171/images/365fa113db1d1f275f08372086a67eb67639b1cd4bfb385c30c4c4615d755365.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61dd921696037bb932bb072926a66e775e759118 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/365fa113db1d1f275f08372086a67eb67639b1cd4bfb385c30c4c4615d755365.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c0eab5b22b6bbbf37542ce48aa813e919af9a9b80f2139cdbeb9e397ae5651d +size 24775 diff --git a/data/2025/2504_13xxx/2504.13171/images/3f3e2e9b5cb6229d20b1f877e460973714d51b6a10c95ac68d990a1efc5aa488.jpg b/data/2025/2504_13xxx/2504.13171/images/3f3e2e9b5cb6229d20b1f877e460973714d51b6a10c95ac68d990a1efc5aa488.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e04d7b444b8fa0974974614e1afc5c8fc6e3063 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/3f3e2e9b5cb6229d20b1f877e460973714d51b6a10c95ac68d990a1efc5aa488.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0a1467c7f028174d5c537e8829d89845653fc056148037b9554106abbba5aa1 +size 24589 diff --git a/data/2025/2504_13xxx/2504.13171/images/413955d38b7116f524b34fa02a4194e6162d26b038f3959b8559a66c88e94715.jpg b/data/2025/2504_13xxx/2504.13171/images/413955d38b7116f524b34fa02a4194e6162d26b038f3959b8559a66c88e94715.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d3c8cea927dbcede91ea429b9f3f51dfeb03449 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/413955d38b7116f524b34fa02a4194e6162d26b038f3959b8559a66c88e94715.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be41a977556939451cd78c3d8c6cf1da766768daf1dbb5e1c775b328c46b45ec +size 26395 diff --git a/data/2025/2504_13xxx/2504.13171/images/41a312c4477e4fd088ca205899ea3c68456c101e0470b62887be153fe91822d3.jpg b/data/2025/2504_13xxx/2504.13171/images/41a312c4477e4fd088ca205899ea3c68456c101e0470b62887be153fe91822d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57160d1edf802045f599f7dd853dd8f3d1d342eb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/41a312c4477e4fd088ca205899ea3c68456c101e0470b62887be153fe91822d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a10fcb8402d3fd39091b43b4dd536bbcd761dbfb90f3f0dac7b2da06ec1a3e26 +size 23122 diff --git a/data/2025/2504_13xxx/2504.13171/images/423c5c2086885577c8816a003c45597b0dfef0a2f0d6e1b80875cc14ecc19dbd.jpg b/data/2025/2504_13xxx/2504.13171/images/423c5c2086885577c8816a003c45597b0dfef0a2f0d6e1b80875cc14ecc19dbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a86395fb82084ea8732e349f03a2dd24b4b45ef9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/423c5c2086885577c8816a003c45597b0dfef0a2f0d6e1b80875cc14ecc19dbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c7a50cc3c38f97dbb0cf5bc62a924af5f9006918def832005b2225e1338c789 +size 22475 diff --git a/data/2025/2504_13xxx/2504.13171/images/42b5d88223c081a31f33d680fd8b318742cc564016467a68f5f3bfb810e3ea80.jpg b/data/2025/2504_13xxx/2504.13171/images/42b5d88223c081a31f33d680fd8b318742cc564016467a68f5f3bfb810e3ea80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16729bbae3e6ee2c4ca582b920cd37f99c59cb38 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/42b5d88223c081a31f33d680fd8b318742cc564016467a68f5f3bfb810e3ea80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:486a10e2cd7370a1ce6c15b1ae818d163aac66199a4c4acdced655b3c05b301e +size 21537 diff --git a/data/2025/2504_13xxx/2504.13171/images/4355e716102865f18267185823de1b7bd43061fb7935bf0fefbe8728ed5ee4e9.jpg b/data/2025/2504_13xxx/2504.13171/images/4355e716102865f18267185823de1b7bd43061fb7935bf0fefbe8728ed5ee4e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73dc518408fcc0519cc273e741c58153dfbfc958 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/4355e716102865f18267185823de1b7bd43061fb7935bf0fefbe8728ed5ee4e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38e46beea3f883f0a1e07ed94550edd3bbddd0f3f7655181fbebef141785e9cf +size 21295 diff --git a/data/2025/2504_13xxx/2504.13171/images/44bf21a5b475305d2d6b9b0740d2c512fba800c682255166c3f572382f1b7504.jpg b/data/2025/2504_13xxx/2504.13171/images/44bf21a5b475305d2d6b9b0740d2c512fba800c682255166c3f572382f1b7504.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09d4b38a265bea5dbda47a96f74dbc764863e521 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/44bf21a5b475305d2d6b9b0740d2c512fba800c682255166c3f572382f1b7504.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5e4066d8ac5d45e39484588922488f97cb6dca9661752d3e351899ab69507e2 +size 73688 diff --git a/data/2025/2504_13xxx/2504.13171/images/458f6be744908ceffeb35b46211f5c57a91e16cb4e790bd7302b18e348bf0ae3.jpg b/data/2025/2504_13xxx/2504.13171/images/458f6be744908ceffeb35b46211f5c57a91e16cb4e790bd7302b18e348bf0ae3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..674b835e7d7b9de248113e6773c966c131a9cdfc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/458f6be744908ceffeb35b46211f5c57a91e16cb4e790bd7302b18e348bf0ae3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e622658e4f9e5b4cf63ed262dfa49448161442b33b5003f3d44f9785d4638ddd +size 29025 diff --git a/data/2025/2504_13xxx/2504.13171/images/4a3b2122718609409e670ead9f1fca9f9e7f4d1d95fb41f646a7c558682e044a.jpg b/data/2025/2504_13xxx/2504.13171/images/4a3b2122718609409e670ead9f1fca9f9e7f4d1d95fb41f646a7c558682e044a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8a93f23aa41e87db2b57a15fc95636ec2ba4a74 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/4a3b2122718609409e670ead9f1fca9f9e7f4d1d95fb41f646a7c558682e044a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6bfec7099920b6188632bdf2765a180b4042017383e1bab8fe1732023686c90 +size 25375 diff --git a/data/2025/2504_13xxx/2504.13171/images/4b1fc9034bbaebb1f0df82ec0551944e3a74ca6d7f25afd20153d66ac81eb7f0.jpg b/data/2025/2504_13xxx/2504.13171/images/4b1fc9034bbaebb1f0df82ec0551944e3a74ca6d7f25afd20153d66ac81eb7f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..535d456dd25293f218241612a310cc5319cd60d2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/4b1fc9034bbaebb1f0df82ec0551944e3a74ca6d7f25afd20153d66ac81eb7f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d681d5687dd0bf45327dd45071042575f3be0f49fc1719ec7639987f4ed3173c +size 23367 diff --git a/data/2025/2504_13xxx/2504.13171/images/55a6225f99256e8f322e14fb27e1459419935b2a5d814983019269649c2c60dc.jpg b/data/2025/2504_13xxx/2504.13171/images/55a6225f99256e8f322e14fb27e1459419935b2a5d814983019269649c2c60dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce45c0cdb71b0ebf885ed14c67a75335949768cb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/55a6225f99256e8f322e14fb27e1459419935b2a5d814983019269649c2c60dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b0c9e603af63be06e5de031ac4a18b3f9a6dbb31e8503773f84bb8374cc446a +size 21990 diff --git a/data/2025/2504_13xxx/2504.13171/images/5627d367118cc64f30b707528e2671712e917d15271f04c9dffee4066dae1d1b.jpg b/data/2025/2504_13xxx/2504.13171/images/5627d367118cc64f30b707528e2671712e917d15271f04c9dffee4066dae1d1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53d6eccddfcfbd409acbbf2a50b6f6a730bb04fc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/5627d367118cc64f30b707528e2671712e917d15271f04c9dffee4066dae1d1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dddf5d38b5c90cbbcb9bc373bf4f589d5595894d22d4ebdfd5c0a6bc391b4887 +size 19903 diff --git a/data/2025/2504_13xxx/2504.13171/images/5a38081906dbbd164cc71bf746dcc600f00469488a4a5807bc51df37e57c9c21.jpg b/data/2025/2504_13xxx/2504.13171/images/5a38081906dbbd164cc71bf746dcc600f00469488a4a5807bc51df37e57c9c21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52159a1e4d31fc25290e6cd9637278940e7cbcdd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/5a38081906dbbd164cc71bf746dcc600f00469488a4a5807bc51df37e57c9c21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0345ea5c842ee836c7627603faae7ce2c0267375e3e431dc4eaaf6f55ae46bd7 +size 140484 diff --git a/data/2025/2504_13xxx/2504.13171/images/5d9becf82727625f8d60269b770eb0d59a2bac80bcbd6871633a0094098c0264.jpg b/data/2025/2504_13xxx/2504.13171/images/5d9becf82727625f8d60269b770eb0d59a2bac80bcbd6871633a0094098c0264.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d89dfcf979953e0241c11f420ae98239f838d1b2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/5d9becf82727625f8d60269b770eb0d59a2bac80bcbd6871633a0094098c0264.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e2afd3cad9b3ee8295171ef0e4f1517134d3a1ddb8be7df5db3b23d27bccd83 +size 17815 diff --git a/data/2025/2504_13xxx/2504.13171/images/6327df099a3cfcfd43d0b8bf11b5f26b5fb5ccf1a9e4ae72133402e156c9f693.jpg b/data/2025/2504_13xxx/2504.13171/images/6327df099a3cfcfd43d0b8bf11b5f26b5fb5ccf1a9e4ae72133402e156c9f693.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb63e4af59ccac7d99b73aae1582bfbde92f77e5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/6327df099a3cfcfd43d0b8bf11b5f26b5fb5ccf1a9e4ae72133402e156c9f693.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58f79aa64bc411321a248dac8fad5008c5ec4a1c2830c0512d66bb4045a82ded +size 26306 diff --git a/data/2025/2504_13xxx/2504.13171/images/637d56caf766e3009837f7cfc829ac2f9d85116c4e69564eddfe1f9ccc723086.jpg b/data/2025/2504_13xxx/2504.13171/images/637d56caf766e3009837f7cfc829ac2f9d85116c4e69564eddfe1f9ccc723086.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e33a91518bbfe317349fae68c60f9e8e2b7e7d2d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/637d56caf766e3009837f7cfc829ac2f9d85116c4e69564eddfe1f9ccc723086.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:128dc4ae8cbdcb479925491dda8bacdee934e40e07bfbb9936629ed219ab88c1 +size 27527 diff --git a/data/2025/2504_13xxx/2504.13171/images/6f014b2d93d2df08f9496b8f69937789a34d1b7d56453542bc2c3ee8ca404703.jpg b/data/2025/2504_13xxx/2504.13171/images/6f014b2d93d2df08f9496b8f69937789a34d1b7d56453542bc2c3ee8ca404703.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bd186a48821ef18b4ec3cb7a29bdeac32de3db0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/6f014b2d93d2df08f9496b8f69937789a34d1b7d56453542bc2c3ee8ca404703.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2551a9bf56683e0a4e0eb941c7e329ffea62459dde9a2c7ec3b1f5e9c4a0dc9d +size 29609 diff --git a/data/2025/2504_13xxx/2504.13171/images/71dacbd7a04395df502eb67e25b479e0da9ede01758fc8ee92258ec894cac7d5.jpg b/data/2025/2504_13xxx/2504.13171/images/71dacbd7a04395df502eb67e25b479e0da9ede01758fc8ee92258ec894cac7d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37bebd142b37077be489663c99beb659b85b99a9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/71dacbd7a04395df502eb67e25b479e0da9ede01758fc8ee92258ec894cac7d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5dfd7c63b9037cebc19727ed6415c62f4753c3e6aba3065201c288bf37d380f +size 23132 diff --git a/data/2025/2504_13xxx/2504.13171/images/73534ffb3cbd9a0a07bdf30f2bdfc74c472b8ac42cc2655311323438e08eeb2b.jpg b/data/2025/2504_13xxx/2504.13171/images/73534ffb3cbd9a0a07bdf30f2bdfc74c472b8ac42cc2655311323438e08eeb2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..826d4b07e044577830b1b24abfc22927395c265a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/73534ffb3cbd9a0a07bdf30f2bdfc74c472b8ac42cc2655311323438e08eeb2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb3888152ad571397a8376b446cf880cd4fbb26b518f3742c329113d01a71c71 +size 21553 diff --git a/data/2025/2504_13xxx/2504.13171/images/7c49fc1860cf4726aae396fb3b16cefca462e3d3421de890219a9abf10fa4854.jpg b/data/2025/2504_13xxx/2504.13171/images/7c49fc1860cf4726aae396fb3b16cefca462e3d3421de890219a9abf10fa4854.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3c70f4551a4809d65de3b2294edf5d43f90e099 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/7c49fc1860cf4726aae396fb3b16cefca462e3d3421de890219a9abf10fa4854.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed189787cee763b7fe24b3e359fbf637a1e8a1b093a74e7fe8c4da4ddc9a48ce +size 25150 diff --git a/data/2025/2504_13xxx/2504.13171/images/8c1c0a04771af849fa0b995a499fded30b6c52677ec321b108aaf05556fb9e8f.jpg b/data/2025/2504_13xxx/2504.13171/images/8c1c0a04771af849fa0b995a499fded30b6c52677ec321b108aaf05556fb9e8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a2bcd82a37f2a3f461d2588c79ea06524d4caba --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/8c1c0a04771af849fa0b995a499fded30b6c52677ec321b108aaf05556fb9e8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e779c2d3c74b0516edb22203c87e73e505cbd3df9049da3dd57228ccbb681bf +size 28558 diff --git a/data/2025/2504_13xxx/2504.13171/images/b08a8856c41c5c9681b412fb0f19ff0e6b12ba98d9d6f867f494f1eb06328d0f.jpg b/data/2025/2504_13xxx/2504.13171/images/b08a8856c41c5c9681b412fb0f19ff0e6b12ba98d9d6f867f494f1eb06328d0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61e8433621d9b533e2f04d3a8a4b97a5fc22acbf --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/b08a8856c41c5c9681b412fb0f19ff0e6b12ba98d9d6f867f494f1eb06328d0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95959a83528bcb6ea49e9189e0db1964cf4b87de14a02427f55f4c6fa2a25979 +size 24480 diff --git a/data/2025/2504_13xxx/2504.13171/images/b7350250b65ae501b1d9d04c80ca8c13f2e3c8cda6b7d2d187c737abd00986d9.jpg b/data/2025/2504_13xxx/2504.13171/images/b7350250b65ae501b1d9d04c80ca8c13f2e3c8cda6b7d2d187c737abd00986d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ff05ac1d4b8e188fb181335340777da3aa13c16 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/b7350250b65ae501b1d9d04c80ca8c13f2e3c8cda6b7d2d187c737abd00986d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f377c536eb517658f61867cbb012e4b8a73ca54427e545d7175c11f099c3bcd3 +size 30376 diff --git a/data/2025/2504_13xxx/2504.13171/images/be80dda675321e25df4853e70378c9a6d7ccffd53f6fa1a8cb060753c2ea4f34.jpg b/data/2025/2504_13xxx/2504.13171/images/be80dda675321e25df4853e70378c9a6d7ccffd53f6fa1a8cb060753c2ea4f34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..859d4d48a97be74008ec10e097a4eeda9506366b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/be80dda675321e25df4853e70378c9a6d7ccffd53f6fa1a8cb060753c2ea4f34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a29b2a45be91190e6ded904db84b1697f31e5763fad07b6026376ec3b9ba3575 +size 26704 diff --git a/data/2025/2504_13xxx/2504.13171/images/cf921c77049cd22ea54b14bd029779f30d7f0d51cf41cc142b25759c70f561f7.jpg b/data/2025/2504_13xxx/2504.13171/images/cf921c77049cd22ea54b14bd029779f30d7f0d51cf41cc142b25759c70f561f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05f51518794855c1d371b10d831f74d94f48c78b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/cf921c77049cd22ea54b14bd029779f30d7f0d51cf41cc142b25759c70f561f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a62bf5ada65867d3287b1473e4e51890cd1b96c80162e5e5939a257d0c2113ea +size 30336 diff --git a/data/2025/2504_13xxx/2504.13171/images/d336d55b961c29c4f0543425946f3f412c63c56d08603713f4fdb923296bab48.jpg b/data/2025/2504_13xxx/2504.13171/images/d336d55b961c29c4f0543425946f3f412c63c56d08603713f4fdb923296bab48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5915e08ce5b5cc3679f35397f950a93edc9e8bd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/d336d55b961c29c4f0543425946f3f412c63c56d08603713f4fdb923296bab48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bb9d2a0753cf8dacb17bb99070a56f9d11912ab17f16fd0963d1d7d93fa4bf0 +size 20339 diff --git a/data/2025/2504_13xxx/2504.13171/images/d61f6b0c6f5310ee2fa005c94e8a0e598a5669d25106ab75c7f0c56923ff9020.jpg b/data/2025/2504_13xxx/2504.13171/images/d61f6b0c6f5310ee2fa005c94e8a0e598a5669d25106ab75c7f0c56923ff9020.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f32a15289da4ad7aca88474f19c4b6623cb0a3ee --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/d61f6b0c6f5310ee2fa005c94e8a0e598a5669d25106ab75c7f0c56923ff9020.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae4964579546d888a6e37783eb51f3d5a1d6b3cef6698a31b5b35189c2bf3dfd +size 21193 diff --git a/data/2025/2504_13xxx/2504.13171/images/dd5adc3e40dc1880e4f336a252462628d9dd6acf39561691398709e16dd2d471.jpg b/data/2025/2504_13xxx/2504.13171/images/dd5adc3e40dc1880e4f336a252462628d9dd6acf39561691398709e16dd2d471.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bb17c029f6cdf99015d9b1229e6eed1552c36a5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/dd5adc3e40dc1880e4f336a252462628d9dd6acf39561691398709e16dd2d471.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:487e9e2036ebd40bbb38c3724fa3c089a92763d9d985a44e27dffa69e1649ba3 +size 21476 diff --git a/data/2025/2504_13xxx/2504.13171/images/e334741bd46fe2c3ff5da946a226d193c1d06a423d439b476f8fe8aa29f0c0c6.jpg b/data/2025/2504_13xxx/2504.13171/images/e334741bd46fe2c3ff5da946a226d193c1d06a423d439b476f8fe8aa29f0c0c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..498492fc804bed082d8b5a4249d7119d1f619add --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/e334741bd46fe2c3ff5da946a226d193c1d06a423d439b476f8fe8aa29f0c0c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949649ca7dbe8ce40408b8ef7bc3ffca7a9eeca12b4b08e8c823f5a70582f0c4 +size 24380 diff --git a/data/2025/2504_13xxx/2504.13171/images/e816c91a3c6187996c99a18f835d16a3079514647bbc0b5fb838612cb818f21b.jpg b/data/2025/2504_13xxx/2504.13171/images/e816c91a3c6187996c99a18f835d16a3079514647bbc0b5fb838612cb818f21b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e27eaa2e27dacfb9152d4ac082eed414ad623112 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/e816c91a3c6187996c99a18f835d16a3079514647bbc0b5fb838612cb818f21b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1571a52804546d923df52dbb4d6e9ea751d3c190322ad7a00795903534b8ea9 +size 19880 diff --git a/data/2025/2504_13xxx/2504.13171/images/f255f5c72f7c2c817bc5622b90c2487cac7aa2f92a0318cf62521fc01ba392d0.jpg b/data/2025/2504_13xxx/2504.13171/images/f255f5c72f7c2c817bc5622b90c2487cac7aa2f92a0318cf62521fc01ba392d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d9ecd408e4b77fed66ad1aa57bdc44bb6184c76 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/images/f255f5c72f7c2c817bc5622b90c2487cac7aa2f92a0318cf62521fc01ba392d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dde594c04e69c0675cd1440cbc4bd3a9e1ae67eb680f33ed7eda0a007dc92d0b +size 25903 diff --git a/data/2025/2504_13xxx/2504.13171/layout.json b/data/2025/2504_13xxx/2504.13171/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e80162c06bd03c89da6848997f1117471e6781fd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13171/layout.json @@ -0,0 +1,15052 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 95, + 464, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 464, + 113 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 464, + 113 + ], + "type": "text", + "content": "Sleep-time Compute: Beyond Inference Scaling at Test-time" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 131, + 208, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 131, + 208, + 144 + ], + "spans": [ + { + "bbox": [ + 69, + 131, + 208, + 144 + ], + "type": "text", + "content": "Kevin Lin " + }, + { + "bbox": [ + 69, + 131, + 208, + 144 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 69, + 131, + 208, + 144 + ], + "type": "text", + "content": " Charlie Snell " + }, + { + "bbox": [ + 69, + 131, + 208, + 144 + ], + "type": "inline_equation", + "content": "^{2*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "spans": [ + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "text", + "content": "Yu Wang " + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "text", + "content": " Charles Packer " + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "text", + "content": " Sarah Wooders " + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "text", + "content": " Ion Stoica " + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "text", + "content": " Joseph E. Gonzalez " + }, + { + "bbox": [ + 69, + 144, + 481, + 159 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 162, + 265, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 162, + 265, + 177 + ], + "spans": [ + { + "bbox": [ + 70, + 162, + 265, + 177 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 162, + 265, + 177 + ], + "type": "text", + "content": "Letta " + }, + { + "bbox": [ + 70, + 162, + 265, + 177 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 162, + 265, + 177 + ], + "type": "text", + "content": "University of California, Berkeley" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 180, + 164, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 180, + 164, + 191 + ], + "spans": [ + { + "bbox": [ + 70, + 180, + 164, + 191 + ], + "type": "text", + "content": "research@letta.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 280, + 222, + 331, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 222, + 331, + 235 + ], + "spans": [ + { + "bbox": [ + 280, + 222, + 331, + 235 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "text", + "content": "Scaling test-time compute has emerged as a key ingredient for enabling large language models (LLMs) to solve difficult problems, but comes with high latency and inference cost. We introduce sleep-time compute, which allows models to \"think\" offline about contexts before queries are presented: by anticipating what queries users might ask and pre-computing useful quantities, we can significantly reduce the compute requirements at test-time. To demonstrate the efficacy of our method, we create modified versions of two reasoning tasks – Stateful GSM-Symbolic and Stateful AIME. We find that sleep-time compute can reduce the amount of test-time compute needed to achieve the same accuracy by " + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "inline_equation", + "content": "\\sim 5\\times" + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "text", + "content": " on Stateful GSM-Symbolic and Stateful AIME and that by scaling sleep-time compute we can further increase accuracy by up to " + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "inline_equation", + "content": "13\\%" + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "text", + "content": " on Stateful GSM-Symbolic and " + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "text", + "content": " on Stateful AIME. Furthermore, we introduce Multi-Query GSM-Symbolic, which extends GSM-Symbolic by including multiple related queries per context. By amortizing sleep-time compute across related queries about the same context using Multi-Query GSM-Symbolic, we can decrease the average cost per query by " + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "inline_equation", + "content": "2.5\\times" + }, + { + "bbox": [ + 67, + 244, + 543, + 439 + ], + "type": "text", + "content": ". We then conduct additional analysis to understand when sleep-time compute is most effective, finding the predictability of the user query to be well correlated with the efficacy of sleep-time compute. Finally, we conduct a case-study of applying sleep-time compute to a realistic agentic SWE task. Code and data released at: https://github.com/letta-ai/sleep-time-compute." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 456, + 160, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 456, + 160, + 468 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 160, + 468 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 482, + 542, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 482, + 542, + 598 + ], + "spans": [ + { + "bbox": [ + 67, + 482, + 542, + 598 + ], + "type": "text", + "content": "Test-time scaling has emerged as an effective way to boost LLM performance on challenging tasks by spending more time thinking on difficult problems (OpenAI, 2024; DeepSeek-AI, 2024; Snell et al., 2024; Brown et al., 2024). However, improved performance from test-time compute comes at a significant increase in latency and cost, waiting potentially several minutes for answers and costing up to tens of dollars per query. These drawbacks are in part due to the fact that the current approach to applying test-time compute assumes that problems are stateless, i.e. queries (user queries at test-time) and the contexts (background information) required for answering them are provided to the model together at \"test-time.\" In practice, this means that if multiple related queries require making similar inferences about the context at \"test-time,\" the model will have to recompute redundant computations each time, incurring additional latency and cost." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 605, + 541, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 605, + 541, + 683 + ], + "spans": [ + { + "bbox": [ + 67, + 605, + 541, + 683 + ], + "type": "text", + "content": "In reality, many LLM applications are inherently stateful, and work in conjunction with persisted, re-used context. A classic example is document question-answering, where documents contextualize responses to questions. Coding agents also operate on a large common repository and participate in multiple rounds of debugging support, while conversational assistants need to maintain the past dialogue. In all these applications, there is context (available documents, a codebase, or conversation history) that is already available before the next user input." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 59, + 86, + 76 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 59, + 86, + 76 + ], + "spans": [ + { + "bbox": [ + 69, + 59, + 86, + 76 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 90, + 60, + 120, + 74 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 60, + 120, + 74 + ], + "spans": [ + { + "bbox": [ + 90, + 60, + 120, + 74 + ], + "type": "text", + "content": "Letta" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 81, + 689, + 295, + 702 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 689, + 295, + 702 + ], + "spans": [ + { + "bbox": [ + 81, + 689, + 295, + 702 + ], + "type": "text", + "content": "1https://platform.openai.com/docs/models/o1-pro" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.13171v1 [cs.AI] 17 Apr 2025" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 110, + 536, + 392 + ], + "blocks": [ + { + "bbox": [ + 78, + 110, + 536, + 392 + ], + "lines": [ + { + "bbox": [ + 78, + 110, + 536, + 392 + ], + "spans": [ + { + "bbox": [ + 78, + 110, + 536, + 392 + ], + "type": "image", + "image_path": "5a38081906dbbd164cc71bf746dcc600f00469488a4a5807bc51df37e57c9c21.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 413, + 541, + 453 + ], + "lines": [ + { + "bbox": [ + 67, + 413, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 67, + 413, + 541, + 453 + ], + "type": "text", + "content": "Figure 1: Example of applying sleep-time compute on Multi-Query GSM-Symbolic-P1. Sleep-time compute processes the original raw context, adding additional computations that can potentially be useful for future queries. Moreover, contexts can be shared across related queries enabling savings in total cost per query." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 472, + 541, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 472, + 541, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 472, + 541, + 615 + ], + "type": "text", + "content": "In these settings, we could in principle, make useful inferences about the current state (context) offline before, or even during the user's next input. We refer to such a process, as sleep-time compute: where inference is done between interactions with the model while it would otherwise be idle in sleep-time. In practice, this is achieved by prompting the model to generate a new context consisting of inferences about the existing context, which may be potentially useful for answering test-time queries. The re-represented context from sleep-time can then be provided in the prompt at test-time, enabling the model to respond to user queries at the accuracy of standard test-time compute but with far lower latencies. For example, a coding assistant at sleep-time may identify architectural patterns, anticipate potential debugging strategies, or infer optimizations prior to the user input. Moreover, users might ask multiple queries about the same context. In these settings, any inferences made during sleep-time can be shared across queries, effectively amortizing the cost of sleep-time compute and reducing the total average cost per query." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 620, + 541, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 620, + 541, + 672 + ], + "spans": [ + { + "bbox": [ + 67, + 620, + 541, + 672 + ], + "type": "text", + "content": "To evaluate sleep-time compute, we modify two mathematical reasoning datasets to introduce two datasets – Stateful GSM-Symbolic and Stateful AIME – by splitting the existing problems in these datasets into a context and a question. Using these datasets, we aim to empirically understand the benefits of sleep-time compute on standard test-time compute benchmarks. We show that:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 94, + 681, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 681, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 94, + 681, + 541, + 720 + ], + "type": "text", + "content": "- Sleep-time compute produces a pareto improvement in the test-time compute vs. accuracy curve, reducing the test-time compute needed to achieve the same accuracy by " + }, + { + "bbox": [ + 94, + 681, + 541, + 720 + ], + "type": "inline_equation", + "content": "\\sim 5\\times" + }, + { + "bbox": [ + 94, + 681, + 541, + 720 + ], + "type": "text", + "content": " on Stateful GSM-Symbolic and Stateful AIME." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 738, + 309, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 738, + 309, + 748 + ], + "spans": [ + { + "bbox": [ + 301, + 738, + 309, + 748 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 94, + 105, + 538, + 193 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "spans": [ + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "type": "text", + "content": "- By scaling up sleep-time compute, we see further pareto improvements, shifting the accuracy up by " + }, + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "type": "inline_equation", + "content": "13\\%" + }, + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "type": "text", + "content": " on Stateful GSM-Symbolic and " + }, + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 94, + 105, + 538, + 129 + ], + "type": "text", + "content": " on Stateful AIME." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 131, + 538, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 131, + 538, + 156 + ], + "spans": [ + { + "bbox": [ + 94, + 131, + 538, + 156 + ], + "type": "text", + "content": "- By amortizing sleep-time compute across multiple queries for the same context, we can reduce the average cost per question by " + }, + { + "bbox": [ + 94, + 131, + 538, + 156 + ], + "type": "inline_equation", + "content": "2.5 \\times" + }, + { + "bbox": [ + 94, + 131, + 538, + 156 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 94, + 157, + 538, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 157, + 538, + 193 + ], + "spans": [ + { + "bbox": [ + 94, + 157, + 538, + 193 + ], + "type": "text", + "content": "- We conduct analysis to understand which queries benefit the most from sleep-time compute, finding that sleep-time compute is more effective in settings where the query is more easily predictable from the context." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 206, + 541, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 206, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 68, + 206, + 541, + 232 + ], + "type": "text", + "content": "Finally, we end with case study of applying sleep-time compute to reduce test-time compute in a realistic agentic software engineering task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 251, + 165, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 251, + 165, + 264 + ], + "spans": [ + { + "bbox": [ + 69, + 251, + 165, + 264 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 280, + 541, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 280, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 68, + 280, + 541, + 396 + ], + "type": "text", + "content": "Scaling test-time compute. Our work builds on recent progress on scaling up computation at test-time for difficult reasoning problems (Snell et al., 2024; DeepSeek-AI, 2024; OpenAI, 2024). Two predominant approaches to test-time scaling have emerged: sequential test-time scaling (OpenAI, 2024; DeepSeek-AI, 2024; Muennighoff et al., 2025; Snell et al., 2024) and parallel test-time scaling (Brown et al., 2024; Snell et al., 2024). While sequential test-time scaling has demonstrated impressive performance improvements, parallel test-time scaling has the advantage of scaling test-time compute without increasing latency. In constraint, we propose an alternative dimension where existing advancements in test-time compute, both sequential and parallel can be applied. Namely, instead of performing inference purely at test-time, we leverage compute on contexts that are available before the actual query arrives." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 411, + 541, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 411, + 541, + 489 + ], + "spans": [ + { + "bbox": [ + 68, + 411, + 541, + 489 + ], + "type": "text", + "content": "Speculative decoding in LLMs. Speculative decoding is a standard technique for reducing latency in decoding with LLMs (Leviathan et al., 2023; Stern et al., 2018; Cai et al., 2024; DeepSeek-AI et al., 2025). Sleep-time compute similarly targets reducing reasoning latency by speculating on the user's query as well as any potentially helpful reasoning over the context. However, unlike speculative decoding, the generated tokens are used as an input regardless of the user's actual query, and at test-time the reasoning model uses these generated tokens to help answer the user query more efficiently." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 504, + 541, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 504, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 68, + 504, + 541, + 594 + ], + "type": "text", + "content": "Pre-computation. Beyond LLMs, a long history of work has explored the trade-off between pre-computation and memory (eg. memory caches Smith (1982) and data cubes for OLAP workloads Gray et al. (1997)). Our work explores the same trade-off between query latency and pre-computation overhead, operating under the assumption that query workload patterns can be reasonably anticipated in advance. sleep-time compute builds on the idea of pre-fetching in traditional operating systems, in the context of LLMs à la Packer et al. (2023), storing frequently used computational results to avoid higher latency at test-time." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 614, + 204, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 614, + 204, + 628 + ], + "spans": [ + { + "bbox": [ + 69, + 614, + 204, + 628 + ], + "type": "text", + "content": "3 Sleep-time Compute" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "spans": [ + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": "In the standard paradigm of applying test-time compute, a user inputs a prompt " + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": " to the LLM and then the LLM applies test-time compute to help answer the user's question. However, the " + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": " provided to the LLM can oftentimes be decomposed into a pre-existing context " + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": " (eg. a codebase) and a user query " + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": " (eg. a question about the codebase). When the LLM is not actively responding to the user, it typically still has access to the existing context " + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": ". During this time, the LLM is typically idling, missing the opportunity to reason about " + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 643, + 541, + 719 + ], + "type": "text", + "content": " offline: a process we term sleep-time compute." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 738, + 309, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 738, + 309, + 748 + ], + "spans": [ + { + "bbox": [ + 301, + 738, + 309, + 748 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "spans": [ + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": "Test-time compute. In the test-time compute setting, the user provides " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " along with some context " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " and the model outputs a reasoning trace followed by a final answer " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ". We denote this process, as: " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "T_{B}(q,c) \\to a" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " is the method for using test-time compute with budget " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ", which could include techniques like extended chains of thought or best-of-N. In practice, the user may have multiple queries about the same context " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q_{1}, q_{2} \\ldots q_{N}" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ". In this setting, the model will carry out independent reasoning processes for each " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q_{i}" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ", even if they are related to the same context " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ". Ideally, we would be able to reuse related inferences across each " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q_{i}" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " to save compute. Moreover, in many cases, " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " is complex and may require carrying out significant processing/inferences in order to provide an answer to " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ". Since, the test-time compute paradigm of " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "T(q,c) \\to a" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " assumes that " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " is only available at the same time as " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": ", standard test-time compute carries out all of these inferences only after the user provides the query, causing the user to wait up to several minutes for a response. However, in practice we often have access to " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " before " + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 105, + 543, + 262 + ], + "type": "text", + "content": " and can carry out much of this processing ahead of time." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": "Sleep-time compute. During sleep-time we are given the context " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " but not the query " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": ". Using just this context " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": ", we can use the LLM to infer likely questions and reason about the context ultimately producing a more new re-represented context " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": ". We denote this process as: " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "S(c) \\to c'" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " can be any standard test-time scaling technique applied towards pre-processing the context at sleep-time. In this work, " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "S(c)" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " is implemented by prompting the model to draw inferences and re-write " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " in a way that might be useful at test-time (see Appendix K for more details). After pre-processing the context, we can provide the new context " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " at test-time in place of " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " to produce a final answer to the user's query: " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "T_b(q, c') \\to a" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": ". Since much of the reasoning about " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " has been done ahead of time in this case, we can use a much smaller test-time budget " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "b < < B" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": ". Moreover, " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " can be shared across different queries " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "q_i" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " about the same context, effectively amortizing the compute required to arrive at " + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 67, + 274, + 544, + 404 + ], + "type": "text", + "content": " across queries, providing a total cost saving." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 421, + 198, + 435 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 421, + 198, + 435 + ], + "spans": [ + { + "bbox": [ + 69, + 421, + 198, + 435 + ], + "type": "text", + "content": "4 Experimental Setup" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 449, + 484, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 449, + 484, + 462 + ], + "spans": [ + { + "bbox": [ + 68, + 449, + 484, + 462 + ], + "type": "text", + "content": "Next, we describe the datasets, models, and baselines we use to evaluate sleep-time compute." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 476, + 134, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 476, + 134, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 476, + 134, + 487 + ], + "type": "text", + "content": "4.1 Datasets" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 498, + 542, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 498, + 542, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 498, + 542, + 539 + ], + "type": "text", + "content": "We select datasets which represent standard benchmarks for LLM reasoning and test-time scaling, and which demonstrate improvements from scaling test-time compute with state-of-the-art LLMs (either reasoning or non-reasoning)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 552, + 543, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 552, + 543, + 641 + ], + "spans": [ + { + "bbox": [ + 67, + 552, + 543, + 641 + ], + "type": "text", + "content": "Stateful datasets. We introduce two datasets to study applying sleep-time compute in stateful settings, Stateful GSM-Symbolic, and Stateful AIME, where each dataset is derived from splitting the existing datasets into a context and a question (see Figure 2 for an example). Stateful GSM-Symbolic is derived from the P1 and P2 splits of GSM-Symbolic (Mirzadeh et al., 2024), which add one and two clauses respectively to the original GSM8K dataset (Cobbe et al., 2021) to that increase the difficulty. GSM-Symbolic P1 contains 5000 examples and P2 2500 examples. Stateful AIME contains 60 questions combined from AIME 2024 and 2025. In Appendix L and M, we show the breakdown of our results across AIME 2024 and 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 655, + 542, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 655, + 542, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 655, + 542, + 721 + ], + "type": "text", + "content": "Amortization dataset. To study the effect of related questions that share context, we introduce a new dataset Multi-Query GSM-Symbolic, where each context has multiple queries. To generate multiple queries for a given context, we take Stateful GSM-Symbolic and use o3-mini to generate additional question answer pairs. We synthetically generate additional questions from existing context question pairs in GSM-Symbolic. Appendix C shows the prompt used to generate the additional questions. Figure 20 shows examples contexts" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 738, + 309, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 738, + 309, + 748 + ], + "spans": [ + { + "bbox": [ + 302, + 738, + 309, + 748 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 80, + 110, + 532, + 286 + ], + "blocks": [ + { + "bbox": [ + 80, + 110, + 532, + 286 + ], + "lines": [ + { + "bbox": [ + 80, + 110, + 532, + 286 + ], + "spans": [ + { + "bbox": [ + 80, + 110, + 532, + 286 + ], + "type": "image", + "image_path": "44bf21a5b475305d2d6b9b0740d2c512fba800c682255166c3f572382f1b7504.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 308, + 541, + 335 + ], + "lines": [ + { + "bbox": [ + 68, + 308, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 68, + 308, + 541, + 335 + ], + "type": "text", + "content": "Figure 2: Example of separating an instance from GSM-Symbolic into context, and question, creating an instance in Stateful GSM-Symbolic." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 356, + 541, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 356, + 541, + 380 + ], + "spans": [ + { + "bbox": [ + 68, + 356, + 541, + 380 + ], + "type": "text", + "content": "and set of questions from the Multi-Query GSM-Symbolic dataset and Table C shows the overall dataset statistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 397, + 194, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 397, + 194, + 408 + ], + "spans": [ + { + "bbox": [ + 69, + 397, + 194, + 408 + ], + "type": "text", + "content": "4.2 Models and Baselines" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 420, + 541, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 420, + 541, + 485 + ], + "spans": [ + { + "bbox": [ + 68, + 420, + 541, + 485 + ], + "type": "text", + "content": "Models. On each dataset, we evaluate models which have poor performance when using a small amount of test-time compute, but yield improvements from scaling up test-time compute. Therefore, on GSM-Symbolic, we conduct experiments using GPT-4o-mini and GPT-4o, and on AIME, we conduct experiments using OpenAI's o1, o3-mini, Anthropic's Claude Sonnet 3.7 Extended Thinking , and Deepseek-R1 (DeepSeek-AI, 2024). " + }, + { + "bbox": [ + 68, + 420, + 541, + 485 + ], + "type": "inline_equation", + "content": "{}^{2}{}^{3}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "content": "Baselines The main baseline we consider is the standard test-time compute setting in which both " + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "content": " are presented to the model for the first time at test-time. Furthermore, to validate that " + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "content": " is not trivially predictable from " + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "content": " on our Stateful GSM-Symbolic and Stateful AIME datasets, we also compare to a context-only baseline in Appendix I, in which the model is only given " + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 499, + 541, + 564 + ], + "type": "text", + "content": " and is tasked with directly guessing an answer to the question it guesses is most likely to come next." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 582, + 226, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 582, + 226, + 597 + ], + "spans": [ + { + "bbox": [ + 69, + 582, + 226, + 597 + ], + "type": "text", + "content": "5 Experiments and Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 611, + 541, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 611, + 541, + 648 + ], + "spans": [ + { + "bbox": [ + 68, + 611, + 541, + 648 + ], + "type": "text", + "content": "In this section, we carry out experiments to understand the benefits of sleep-time compute. Specifically, we would like to answer each of the following questions using the math reasoning benchmarks introduced above:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 661, + 473, + 687 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 91, + 661, + 473, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 661, + 473, + 673 + ], + "spans": [ + { + "bbox": [ + 91, + 661, + 473, + 673 + ], + "type": "text", + "content": "1. Can sleep-time compute shift the pareto frontier of test-time compute vs. accuracy?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 91, + 674, + 411, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 674, + 411, + 687 + ], + "spans": [ + { + "bbox": [ + 91, + 674, + 411, + 687 + ], + "type": "text", + "content": "2. Does scaling sleep-time compute in-turn improve the pareto further?" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 81, + 696, + 188, + 708 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 696, + 188, + 708 + ], + "spans": [ + { + "bbox": [ + 81, + 696, + 188, + 708 + ], + "type": "text", + "content": "2https://openai.com/o1/" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 81, + 708, + 266, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 708, + 266, + 720 + ], + "spans": [ + { + "bbox": [ + 81, + 708, + 266, + 720 + ], + "type": "text", + "content": "3https://www.anthropic.com/claudi/sonnet" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "spans": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 103, + 306, + 277 + ], + "blocks": [ + { + "bbox": [ + 72, + 103, + 306, + 277 + ], + "lines": [ + { + "bbox": [ + 72, + 103, + 306, + 277 + ], + "spans": [ + { + "bbox": [ + 72, + 103, + 306, + 277 + ], + "type": "image", + "image_path": "f255f5c72f7c2c817bc5622b90c2487cac7aa2f92a0318cf62521fc01ba392d0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 281, + 417, + 298 + ], + "lines": [ + { + "bbox": [ + 220, + 281, + 417, + 298 + ], + "spans": [ + { + "bbox": [ + 220, + 281, + 417, + 298 + ], + "type": "text", + "content": "--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 67, + 315, + 541, + 341 + ], + "lines": [ + { + "bbox": [ + 67, + 315, + 541, + 341 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 541, + 341 + ], + "type": "text", + "content": "Figure 3: The test-time compute vs. accuracy tradeoff for on Stateful GSM-Symbolic. Shaded area indicates where sleep-time compute improves the pareto test-time accuracy trade-off." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 307, + 105, + 538, + 277 + ], + "blocks": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "lines": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "spans": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "type": "image", + "image_path": "be80dda675321e25df4853e70378c9a6d7ccffd53f6fa1a8cb060753c2ea4f34.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 91, + 361, + 541, + 401 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 91, + 361, + 541, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 361, + 541, + 387 + ], + "spans": [ + { + "bbox": [ + 91, + 361, + 541, + 387 + ], + "type": "text", + "content": "3. When there are multiple related questions for a single context, can amortizing test-time compute with sleep-time compute provide a total token efficiency benefit?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 92, + 388, + 400, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 388, + 400, + 401 + ], + "spans": [ + { + "bbox": [ + 92, + 388, + 400, + 401 + ], + "type": "text", + "content": "4. In what settings does sleep-time compute provide the most uplift?" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 417, + 382, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 417, + 382, + 430 + ], + "spans": [ + { + "bbox": [ + 68, + 417, + 382, + 430 + ], + "type": "text", + "content": "5.1 Improving Pareto Test-Time Trade-off with sleep-time compute" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 441, + 541, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 441, + 541, + 467 + ], + "spans": [ + { + "bbox": [ + 67, + 441, + 541, + 467 + ], + "type": "text", + "content": "We first determine the test-time compute, accuracy pareto frontier by scaling standard test-time compute sequentially and in parallel. We then study how applying sleep-time compute affects the pareto trade-off." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 481, + 541, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 541, + 624 + ], + "type": "text", + "content": "Scaling test-time-compute sequentially. For non-reasoning models (GPT-4o and 4o-mini) on Stateful GSM-Symbolic, to vary the amount of test-time compute, we construct prompts that instruct the model to use different amounts of vocabulary at test time, eg. \"answer directly with a single sentence\" vs. \"double check your reasoning before outputting the final answer.\" The full prompts are in Appendix A. We use temperature 0 for generation. We see in Figure 3 that there is a tradeoff between accuracy and the amount of test-time compute, and that adding sleep-time compute can move beyond the pareto compute-accuracy curve. In particular, at lower test-time budgets, the performance of sleep-time compute is significantly better than the baseline, achieving performance comparable to that of the baseline with " + }, + { + "bbox": [ + 67, + 481, + 541, + 624 + ], + "type": "inline_equation", + "content": "5 \\times" + }, + { + "bbox": [ + 67, + 481, + 541, + 624 + ], + "type": "text", + "content": " less test-time tokens. However, at the test-tome compute budgets, the test-time compute only baseline slightly outperforms sleep-time compute. We hypothesize that this may be because the standard test-time compute only has the content relevant to the specific question, so there is less distracting information in the prompt." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 629, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 629, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 629, + 541, + 720 + ], + "type": "text", + "content": "For reasoning models on Stateful AIME, we scale the amount of test-time compute based on what is available in the API in the case of o1, o3-mini and Claude Sonnet 3.7. Since the Deepseek-R1 API does not provide a way to control test-time compute, we apply the \"budget forcing\" and extension prompt from Muennighoff et al. (2025). Figure 4 shows the results for each model on Stateful AIME. We average results over 3 runs for o1, o3-mini and R1. For Claude 3.7 Sonnet, we average over 10 runs as we observed more noise in initial experiments. On all models, we see a significant test-time, accuracy pareto shift from applying sleep-time compute, with the exception of o1, which demonstrates limited gains." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "spans": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 103, + 306, + 277 + ], + "blocks": [ + { + "bbox": [ + 76, + 103, + 306, + 277 + ], + "lines": [ + { + "bbox": [ + 76, + 103, + 306, + 277 + ], + "spans": [ + { + "bbox": [ + 76, + 103, + 306, + 277 + ], + "type": "image", + "image_path": "2308b3f1bcede6c06e77fd345589dd4cd693c8339bdcb18120d602b306ca4401.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 307, + 103, + 539, + 278 + ], + "blocks": [ + { + "bbox": [ + 307, + 103, + 539, + 278 + ], + "lines": [ + { + "bbox": [ + 307, + 103, + 539, + 278 + ], + "spans": [ + { + "bbox": [ + 307, + 103, + 539, + 278 + ], + "type": "image", + "image_path": "1926d6664b05fedc06c1a506c47cf9eb1e635d10bb037860cf852ebf81e28355.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 279, + 306, + 453 + ], + "blocks": [ + { + "bbox": [ + 72, + 279, + 306, + 453 + ], + "lines": [ + { + "bbox": [ + 72, + 279, + 306, + 453 + ], + "spans": [ + { + "bbox": [ + 72, + 279, + 306, + 453 + ], + "type": "image", + "image_path": "637d56caf766e3009837f7cfc829ac2f9d85116c4e69564eddfe1f9ccc723086.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 468, + 541, + 508 + ], + "lines": [ + { + "bbox": [ + 67, + 468, + 541, + 508 + ], + "spans": [ + { + "bbox": [ + 67, + 468, + 541, + 508 + ], + "type": "text", + "content": "Figure 4: The test-time compute vs. accuracy tradeoff on Stateful AIME for various reasoning models. Applying sleep-time compute allows models to reach similar levels of performance with much less compute at test-time. The shaded area indicates the pareto improvement from sleep-time compute." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 280, + 538, + 453 + ], + "blocks": [ + { + "bbox": [ + 306, + 280, + 538, + 453 + ], + "lines": [ + { + "bbox": [ + 306, + 280, + 538, + 453 + ], + "spans": [ + { + "bbox": [ + 306, + 280, + 538, + 453 + ], + "type": "image", + "image_path": "4b1fc9034bbaebb1f0df82ec0551944e3a74ca6d7f25afd20153d66ac81eb7f0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 578, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 578, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 578, + 541, + 720 + ], + "type": "text", + "content": "Scaling test-time compute in parallel. An alternative approach to scaling test-time compute is via parallel sampling, which also has the benefit of maintaining low inference latency. The simplest approach to scaling parallel test-time compute is pass@k (Brown et al., 2024), which makes the unrealistic assumption of having oracle query access to a ground truth verifier at test-time, an assumption which we do not make with sleep-time compute. Therefore, outperforming the pass@k baseline would represent a meaningful improvement over parallel test-time scaling. We apply parallel scaling to the lowest sequential compute setting on each task, since scaling pass@k with higher sequential compute settings would quickly reach token budgets that exceed that of sleep-time compute in the maximum sequential setting. We see that across all tasks and models, sleep-time compute consistently outperforms pass@k parallel scaling at the same test-time token budget, demonstrating that sleep-time compute can be a more effective way to scale inference-time compute than standard parallel test-time scaling." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 738, + 309, + 747 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 738, + 309, + 747 + ], + "spans": [ + { + "bbox": [ + 301, + 738, + 309, + 747 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 103, + 304, + 277 + ], + "blocks": [ + { + "bbox": [ + 72, + 103, + 304, + 277 + ], + "lines": [ + { + "bbox": [ + 72, + 103, + 304, + 277 + ], + "spans": [ + { + "bbox": [ + 72, + 103, + 304, + 277 + ], + "type": "image", + "image_path": "3f3e2e9b5cb6229d20b1f877e460973714d51b6a10c95ac68d990a1efc5aa488.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 281, + 416, + 298 + ], + "lines": [ + { + "bbox": [ + 222, + 281, + 416, + 298 + ], + "spans": [ + { + "bbox": [ + 222, + 281, + 416, + 298 + ], + "type": "text", + "content": "--- gpt-4o-mini -gpt-4o \n--- gpt-4o-mini + background scaling -gpt-4o + background scaling" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 67, + 315, + 541, + 341 + ], + "lines": [ + { + "bbox": [ + 67, + 315, + 541, + 341 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 541, + 341 + ], + "type": "text", + "content": "Figure 5: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful GSM-Symbolic. We see that sleep-time compute generally pareto dominates pass@k." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 307, + 105, + 538, + 277 + ], + "blocks": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "lines": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "spans": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "type": "image", + "image_path": "6327df099a3cfcfd43d0b8bf11b5f26b5fb5ccf1a9e4ae72133402e156c9f693.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 361, + 235, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 361, + 235, + 374 + ], + "spans": [ + { + "bbox": [ + 69, + 361, + 235, + 374 + ], + "type": "text", + "content": "5.2 Scaling up sleep-time compute" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "text", + "content": "We would like to understand how scaling compute during sleep-time can further effect the pareto shift that we observed in Section 5.1. To scale up the amount of sleep-time compute, for non-reasoning models, we run " + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "text", + "content": " parallel generations, given input " + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "text", + "content": ", resulting in " + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "inline_equation", + "content": "c_{1},\\ldots ,c_{k}" + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "text", + "content": ". At test-time, the model then receives the inputs concatenated " + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "inline_equation", + "content": "c_{1},\\ldots ,c_{k}" + }, + { + "bbox": [ + 67, + 384, + 541, + 462 + ], + "type": "text", + "content": " to generate the final answer. On reasoning models, we scale up the amount of sleep-time compute by varying the reasoning effort for o1 and for o3-mini when applying the sleep-time compute prompt. At test-time, we vary the amount of compute in the same way as 5.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "spans": [ + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "type": "text", + "content": "In Figure 7, we see that further scaling sleep-time compute on Stateful GSM-Symbolic shifts the pareto curve outwards, improving performance by up to " + }, + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "type": "inline_equation", + "content": "13\\%" + }, + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "type": "text", + "content": " at a similar test-time budget. In particular, we see the largest gains on more difficult tasks with stronger models (eg. on P2 with 'gpt-4o'), suggesting that on tasks with more complicated contexts additional sleep-time compute can be beneficial. However, in this setting, there seems to be a limit to the number of parallel agents that can improve performance, as we find that 5 parallel generations generally outperforms 10. In Figure 26, we scale up sleep-time compute on Stateful AIME. Similarly, we also see that scaling compute at sleep-time generally shifts the pareto curve outward, improving performance by up to " + }, + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 67, + 468, + 541, + 573 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 588, + 398, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 588, + 398, + 601 + ], + "spans": [ + { + "bbox": [ + 67, + 588, + 398, + 601 + ], + "type": "text", + "content": "5.3 Amortizing sleep-time compute across queries with shared context" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "text", + "content": "We want to understand how the total cost of inference can be improved by applying sleep-time compute in settings where each context has multiple queries. Since at test-time, there are strict latency constraints, and latency optimized inference can be roughly " + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "text", + "content": " more expensive, we model the total cost of inference between both sleep-time and test-time, by up-weighing the cost of test-time tokens. Specifically, we consider a simple linear model where tokens generated at test-time are a factor " + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "text", + "content": " the cost of the tokens at sleep-time. In our analysis, we set " + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "inline_equation", + "content": "t = 10" + }, + { + "bbox": [ + 67, + 611, + 541, + 689 + ], + "type": "text", + "content": " Our analysis can be generalized to different cost functions that consider" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 696, + 512, + 718 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 696, + 512, + 718 + ], + "spans": [ + { + "bbox": [ + 67, + 696, + 512, + 718 + ], + "type": "text", + "content": "4https://docs.databricks.com/aws/en/machine-learning/foundation-model apis/prov-throughput-run-benchmark" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 738, + 308, + 747 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 738, + 308, + 747 + ], + "spans": [ + { + "bbox": [ + 302, + 738, + 308, + 747 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 103, + 304, + 261 + ], + "blocks": [ + { + "bbox": [ + 72, + 103, + 304, + 261 + ], + "lines": [ + { + "bbox": [ + 72, + 103, + 304, + 261 + ], + "spans": [ + { + "bbox": [ + 72, + 103, + 304, + 261 + ], + "type": "image", + "image_path": "dd5adc3e40dc1880e4f336a252462628d9dd6acf39561691398709e16dd2d471.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 306, + 103, + 535, + 261 + ], + "blocks": [ + { + "bbox": [ + 306, + 103, + 535, + 261 + ], + "lines": [ + { + "bbox": [ + 306, + 103, + 535, + 261 + ], + "spans": [ + { + "bbox": [ + 306, + 103, + 535, + 261 + ], + "type": "image", + "image_path": "1f9948f71a5de08c99f17c78d81687f5a2a98244e84788ca66a7d46c11aa2a71.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 71, + 262, + 304, + 418 + ], + "blocks": [ + { + "bbox": [ + 71, + 262, + 304, + 418 + ], + "lines": [ + { + "bbox": [ + 71, + 262, + 304, + 418 + ], + "spans": [ + { + "bbox": [ + 71, + 262, + 304, + 418 + ], + "type": "image", + "image_path": "328926ef62062bfeb25906aade49b6862510ae51452e9773e6551e37d460a789.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 424, + 373, + 436 + ], + "lines": [ + { + "bbox": [ + 238, + 424, + 373, + 436 + ], + "spans": [ + { + "bbox": [ + 238, + 424, + 373, + 436 + ], + "type": "text", + "content": "sleep-time compute pass @ k" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 68, + 453, + 541, + 479 + ], + "lines": [ + { + "bbox": [ + 68, + 453, + 541, + 479 + ], + "spans": [ + { + "bbox": [ + 68, + 453, + 541, + 479 + ], + "type": "text", + "content": "Figure 6: Comparing test-time scaling with sleep-time compute against parallel test-time scaling with pass@k on Stateful AIME. We see that sleep-time compute generally pareto dominates pass@k." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 262, + 538, + 418 + ], + "blocks": [ + { + "bbox": [ + 306, + 262, + 538, + 418 + ], + "lines": [ + { + "bbox": [ + 306, + 262, + 538, + 418 + ], + "spans": [ + { + "bbox": [ + 306, + 262, + 538, + 418 + ], + "type": "image", + "image_path": "73534ffb3cbd9a0a07bdf30f2bdfc74c472b8ac42cc2655311323438e08eeb2b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 504, + 541, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 504, + 541, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 504, + 541, + 544 + ], + "type": "text", + "content": "non-linear user-utility. Figure 9 shows the results for different number of questions per context. We see that we can decrease the average cost per query by up to " + }, + { + "bbox": [ + 67, + 504, + 541, + 544 + ], + "type": "inline_equation", + "content": "2.5 \\times" + }, + { + "bbox": [ + 67, + 504, + 541, + 544 + ], + "type": "text", + "content": " when there are 10 queries per context, compared to the single-query baseline." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 565, + 359, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 565, + 359, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 565, + 359, + 578 + ], + "type": "text", + "content": "5.4 Predictable queries benefit more from sleep-time compute" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 590, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 541, + 720 + ], + "type": "text", + "content": "We would like to better understand for what contexts sleep-time compute is most useful. Since the utility of sleep-time compute relies on there being some shared information or structure between the context and the query, we hypothesize that sleep-time compute may be most effective in settings where the query is more predictable from the context. To test this on Stateful GSM-Symbolic, we first quantify how predictable a given query is by measuring the log-probability of the question given the context under the Llama2-70B base model (Touvron et al., 2023). In Appendix E, we include examples of highly predictable and unpredictable questions under this notion of question predictability. We see from these examples, that our notion of question predictability generally aligns with the intuition that contexts where the query pattern is more predictable benefit most from sleep-time compute. The more predictable questions are far simpler and the less predictable ones are more complex." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "spans": [ + { + "bbox": [ + 302, + 738, + 309, + 747 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 103, + 281, + 249 + ], + "blocks": [ + { + "bbox": [ + 71, + 103, + 281, + 249 + ], + "lines": [ + { + "bbox": [ + 71, + 103, + 281, + 249 + ], + "spans": [ + { + "bbox": [ + 71, + 103, + 281, + 249 + ], + "type": "image", + "image_path": "41a312c4477e4fd088ca205899ea3c68456c101e0470b62887be153fe91822d3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 251, + 236, + 258 + ], + "lines": [ + { + "bbox": [ + 135, + 251, + 236, + 258 + ], + "spans": [ + { + "bbox": [ + 135, + 251, + 236, + 258 + ], + "type": "text", + "content": "Avg. Test Time Tokens / Question" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 102, + 273, + 535, + 288 + ], + "lines": [ + { + "bbox": [ + 102, + 273, + 535, + 288 + ], + "spans": [ + { + "bbox": [ + 102, + 273, + 535, + 288 + ], + "type": "text", + "content": "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n- gpt-4o-mini, 1 parallel sleep-time compute\n- gpt-4o-mini, 2 parallel sleep-time compute" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 283, + 105, + 539, + 249 + ], + "blocks": [ + { + "bbox": [ + 283, + 105, + 539, + 249 + ], + "lines": [ + { + "bbox": [ + 283, + 105, + 539, + 249 + ], + "spans": [ + { + "bbox": [ + 283, + 105, + 539, + 249 + ], + "type": "image", + "image_path": "6f014b2d93d2df08f9496b8f69937789a34d1b7d56453542bc2c3ee8ca404703.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 251, + 446, + 258 + ], + "lines": [ + { + "bbox": [ + 345, + 251, + 446, + 258 + ], + "spans": [ + { + "bbox": [ + 345, + 251, + 446, + 258 + ], + "type": "text", + "content": "Avg. Test Time Tokens / Question" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 72, + 361, + 305, + 568 + ], + "blocks": [ + { + "bbox": [ + 67, + 304, + 541, + 344 + ], + "lines": [ + { + "bbox": [ + 67, + 304, + 541, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 541, + 344 + ], + "type": "text", + "content": "Figure 7: Scaling up sleep-time compute for different test-time compute budgets on Stateful GSM-Symbolic, by generating up multiple " + }, + { + "bbox": [ + 67, + 304, + 541, + 344 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 67, + 304, + 541, + 344 + ], + "type": "text", + "content": " in parallel. Applying more sleep-time compute shifts the pareto beyond the standard test-time-compute vs. accuracy curve." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 361, + 305, + 568 + ], + "lines": [ + { + "bbox": [ + 72, + 361, + 305, + 568 + ], + "spans": [ + { + "bbox": [ + 72, + 361, + 305, + 568 + ], + "type": "image", + "image_path": "2cd2d9e3f337b30563c0f4c5cd4be96e9eb1d8c91b03affde7bed5f52bf5b8cb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 577, + 475, + 587 + ], + "lines": [ + { + "bbox": [ + 187, + 577, + 475, + 587 + ], + "spans": [ + { + "bbox": [ + 187, + 577, + 475, + 587 + ], + "type": "text", + "content": "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 361, + 538, + 567 + ], + "blocks": [ + { + "bbox": [ + 307, + 361, + 538, + 567 + ], + "lines": [ + { + "bbox": [ + 307, + 361, + 538, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 361, + 538, + 567 + ], + "type": "image", + "image_path": "458f6be744908ceffeb35b46211f5c57a91e16cb4e790bd7302b18e348bf0ae3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 604, + 541, + 643 + ], + "lines": [ + { + "bbox": [ + 67, + 604, + 541, + 643 + ], + "spans": [ + { + "bbox": [ + 67, + 604, + 541, + 643 + ], + "type": "text", + "content": "Figure 8: Increasing the amount of sleep-time compute for different test-time compute budgets on Stateful AIME by varying the reasoning effort when applying the sleep-time compute prompt. Applying more sleep-time compute further moves the test-time-compute vs. accuracy pareto curve." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 668, + 541, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 668, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 668, + 541, + 721 + ], + "type": "text", + "content": "Using our question predictability score, we then bin each example in Stateful GSM-Symbolic into five quantiles according to its predictability score and report the accuracy within each bin. For this experiment, we use the \"Verbosity 0\" prompt. In Figure 10, we see that on both GSM8K-Symbolic P1 and P2, the accuracy gap between sleep-time compute and standard test-time compute widens as the questions become more" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 141, + 302, + 301 + ], + "blocks": [ + { + "bbox": [ + 71, + 141, + 302, + 301 + ], + "lines": [ + { + "bbox": [ + 71, + 141, + 302, + 301 + ], + "spans": [ + { + "bbox": [ + 71, + 141, + 302, + 301 + ], + "type": "image", + "image_path": "2d4bfd98c4bc1cd04ee702ecdaa3e80d2a6199d22c73c03bf9968ae991cd325d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 158, + 310, + 264, + 317 + ], + "lines": [ + { + "bbox": [ + 158, + 310, + 264, + 317 + ], + "spans": [ + { + "bbox": [ + 158, + 310, + 264, + 317 + ], + "type": "text", + "content": "1 Questions/Context Sleep-time Compute" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 275, + 311, + 375, + 317 + ], + "lines": [ + { + "bbox": [ + 275, + 311, + 375, + 317 + ], + "spans": [ + { + "bbox": [ + 275, + 311, + 375, + 317 + ], + "type": "text", + "content": "5 Questions/Context Sleep-time Compute" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 275, + 318, + 377, + 324 + ], + "lines": [ + { + "bbox": [ + 275, + 318, + 377, + 324 + ], + "spans": [ + { + "bbox": [ + 275, + 318, + 377, + 324 + ], + "type": "text", + "content": "10 Questions/Context Sleep-time Compute" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 67, + 341, + 541, + 392 + ], + "lines": [ + { + "bbox": [ + 67, + 341, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 341, + 541, + 392 + ], + "type": "text", + "content": "Figure 9: Amortizing sleep-time compute, using the Multi-Query GSM-Symbolic dataset. When there are fewer questions per context, we see that it is less favorable to use sleep-time compute, in terms of total cost. However, as the questions per context are increased, we see that applying sleep-time compute can improve the cost-accuracy pareto." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 304, + 141, + 539, + 301 + ], + "blocks": [ + { + "bbox": [ + 159, + 318, + 263, + 324 + ], + "lines": [ + { + "bbox": [ + 159, + 318, + 263, + 324 + ], + "spans": [ + { + "bbox": [ + 159, + 318, + 263, + 324 + ], + "type": "text", + "content": "2 Questions/Context Sleep-time Compute" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 304, + 141, + 539, + 301 + ], + "lines": [ + { + "bbox": [ + 304, + 141, + 539, + 301 + ], + "spans": [ + { + "bbox": [ + 304, + 141, + 539, + 301 + ], + "type": "image", + "image_path": "71dacbd7a04395df502eb67e25b479e0da9ede01758fc8ee92258ec894cac7d5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 73, + 490, + 302, + 613 + ], + "blocks": [ + { + "bbox": [ + 214, + 476, + 397, + 486 + ], + "lines": [ + { + "bbox": [ + 214, + 476, + 397, + 486 + ], + "spans": [ + { + "bbox": [ + 214, + 476, + 397, + 486 + ], + "type": "text", + "content": "Predictability Analysis of GPT-4o-mini on GSM-Symbolic" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 490, + 302, + 613 + ], + "lines": [ + { + "bbox": [ + 73, + 490, + 302, + 613 + ], + "spans": [ + { + "bbox": [ + 73, + 490, + 302, + 613 + ], + "type": "image", + "image_path": "365fa113db1d1f275f08372086a67eb67639b1cd4bfb385c30c4c4615d755365.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 304, + 491, + 536, + 613 + ], + "blocks": [ + { + "bbox": [ + 304, + 491, + 536, + 613 + ], + "lines": [ + { + "bbox": [ + 304, + 491, + 536, + 613 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 536, + 613 + ], + "type": "image", + "image_path": "413955d38b7116f524b34fa02a4194e6162d26b038f3959b8559a66c88e94715.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 627, + 541, + 679 + ], + "lines": [ + { + "bbox": [ + 67, + 627, + 541, + 679 + ], + "spans": [ + { + "bbox": [ + 67, + 627, + 541, + 679 + ], + "type": "text", + "content": "Figure 10: GSM-Symbolic questions binned by how predictable they are from the context. We compare the performance of sleep-time compute and standard test-time compute in the lowest test-time compute budget setting on both P1 and P2. The gap between sleep-time compute and standard test-time inference widens as the question becomes more predictable from the context." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 189, + 106, + 421, + 277 + ], + "blocks": [ + { + "bbox": [ + 189, + 106, + 421, + 277 + ], + "lines": [ + { + "bbox": [ + 189, + 106, + 421, + 277 + ], + "spans": [ + { + "bbox": [ + 189, + 106, + 421, + 277 + ], + "type": "image", + "image_path": "cf921c77049cd22ea54b14bd029779f30d7f0d51cf41cc142b25759c70f561f7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 293, + 541, + 332 + ], + "lines": [ + { + "bbox": [ + 68, + 293, + 541, + 332 + ], + "spans": [ + { + "bbox": [ + 68, + 293, + 541, + 332 + ], + "type": "text", + "content": "Figure 11: Applying sleep-time compute to SWE-Features. We see that at lower test-time budgets, sleep-time compute has higher F1 score than standard test-time scaling. However, at higher budgets, standard test-time scaling is better." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 352, + 541, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 352, + 541, + 378 + ], + "spans": [ + { + "bbox": [ + 68, + 352, + 541, + 378 + ], + "type": "text", + "content": "predictable from the context confirming our hypothesis that indeed sleep-time compute is most beneficial in settings where the question can be predicted from the context." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 395, + 388, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 395, + 388, + 410 + ], + "spans": [ + { + "bbox": [ + 68, + 395, + 388, + 410 + ], + "type": "text", + "content": "6 A Case Study of Sleep-time Compute for Agentic SWE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 422, + 541, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 422, + 541, + 461 + ], + "spans": [ + { + "bbox": [ + 68, + 422, + 541, + 461 + ], + "type": "text", + "content": "In this section, we evaluate sleep-time compute in a realistic multi-turn agentic setting. To this end, we introduce SWE-Features, a software engineering benchmark focused on tasks that require: (1) editing multiple files within a repository, and (2) implementing new features." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "spans": [ + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "text", + "content": "SWE-Features. In contrast to popular benchmarks like SWE-Bench (Jimenez et al., 2024), which involve modifying a small number of files, we propose a new dataset called SWE-Features, which collects PRs which modify at least three files (see Appendix D for more details). In this setting, we use the PR that we want to solve as " + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "text", + "content": " and select several related PRs for " + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "text", + "content": ". At sleep-time the agent is allowed to explore the repository before producing " + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 68, + 474, + 541, + 538 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 552, + 541, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 552, + 541, + 604 + ], + "spans": [ + { + "bbox": [ + 68, + 552, + 541, + 604 + ], + "type": "text", + "content": "Evaluation. Since the PRs are scraped from GitHub, there are not straightforward tests to use for evaluation. Instead, we compare the predicted set of modified files with the ground truth list of modified files, and report the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set (see Appendix D for details)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 616, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 616, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 68, + 616, + 541, + 720 + ], + "type": "text", + "content": "Results. Figure 11 shows consist trends with Section 5.1 for SWE-Features: at lower test-time compute budgets, leveraging sleep-time compute can improve performance, achieving up to roughly a " + }, + { + "bbox": [ + 68, + 616, + 541, + 720 + ], + "type": "inline_equation", + "content": "1.5 \\times" + }, + { + "bbox": [ + 68, + 616, + 541, + 720 + ], + "type": "text", + "content": " decrease in test-time tokens. However, when the test-time compute budget is high, using only test-time compute can perform better. Additionally, we observe that in the high test-time budget setting standard test-time compute has higher precision and comparable recall. We hypothesize that, using only test-time compute tends to begin editing files earlier and usually edits fewer files overall. In contrast, the agent with sleep-time compute, having explored more files during the test-time phase, tends to edit more files, which may lead to slightly lower precision." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 104, + 241, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 104, + 241, + 118 + ], + "spans": [ + { + "bbox": [ + 69, + 104, + 241, + 118 + ], + "type": "text", + "content": "7 Discussion and Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 131, + 543, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 131, + 543, + 212 + ], + "spans": [ + { + "bbox": [ + 67, + 131, + 543, + 212 + ], + "type": "text", + "content": "Query predictability and allocating sleep-time compute In Section 5.4, we found that sleep-time compute is most effective when the queries are predictable from the context. In settings where the queries are challenging to predict or unrelated to the context, sleep-time compute will be less effective. In these settings, it may be preferable to apply standard test-time scaling instead. An interesting direction for future work is identifying which contexts may have predictable questions and optimally allocating inference compute between sleep-time and test-time across different contexts and queries." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 221, + 544, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 221, + 544, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 221, + 544, + 301 + ], + "type": "text", + "content": "Extending sleep-time compute beyond context-query decomposition. In our experiments, we make the simplifying assumption that interactions fall into two phases: sleep-time and test-time. However, real-world LLM use cases can be more complex, with multiple rounds of interaction and context modifications between rounds (e.g. multiple edits to a code-base). Moreover, the length of the sleep-time may also vary significantly between interactions (eg. short spans between user typing or days of inactivity). Future work should extend sleep-time compute paradigm to more elegantly handle these scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 312, + 544, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 312, + 544, + 417 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 544, + 417 + ], + "type": "text", + "content": "Sleep-time compute as representation learning over tokens. Our approach to applying compute at sleep-time resembles representation learning. We first transform the context into a representation that is more amenable to answering test-time queries, and then we utilize that representation at test-time to rapidly answer queries. Unlike traditional representation learning (Bengio et al., 2014), which typically operates in model parameter or activation space, we instead form representations in the space of natural language. This approach builds on recent work which implements statistical modeling techniques in the space of natural language using modern LLMs (Zhong et al., 2022; 2025). Future work should further explore the potential for sleep-time compute to enable the learning of useful natural language representations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 428, + 543, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 428, + 543, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 428, + 543, + 521 + ], + "type": "text", + "content": "Synthetic data generation via sleep-time compute. Due to limits on the amount of internet data available, in order to support the continued scaling of LLM pretraining, recent works have began exploring methods for generating synthetic pretraining data (Yang et al., 2024; Gunasekar et al., 2023). One emerging approach to synthetic data generation involves using test-time compute to generate improved data (Bansal et al., 2024; DeepSeek-AI et al., 2025). Generating such data at pretraining scale will be very expensive, and future work could explore using sleep-time compute to help amortize some of this cost across related queries, or using the output of sleep-time compute itself as a form of synthetic data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 535, + 133, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 535, + 133, + 548 + ], + "spans": [ + { + "bbox": [ + 69, + 535, + 133, + 548 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 555, + 542, + 719 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 70, + 555, + 542, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 555, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 70, + 555, + 542, + 594 + ], + "type": "text", + "content": "Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training llm reasoners via compute-optimal sampling, 2024. URL https://arxiv.org/abs/2408.16737." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 602, + 542, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 602, + 542, + 628 + ], + "spans": [ + { + "bbox": [ + 70, + 602, + 542, + 628 + ], + "type": "text", + "content": "Yoshua Bengio, Aaron Courville, and Pascal Vincent. Representation learning: A review and new perspectives, 2014. URL https://arxiv.org/abs/1206.5538." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 635, + 542, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 635, + 542, + 673 + ], + "spans": [ + { + "bbox": [ + 70, + 635, + 542, + 673 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 681, + 542, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 681, + 542, + 719 + ], + "spans": [ + { + "bbox": [ + 70, + 681, + 542, + 719 + ], + "type": "text", + "content": "Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, Jason D. Lee, Deming Chen, and Tri Dao. Medusa: Simple llm inference acceleration framework with multiple decoding heads, 2024. URL https://arxiv.org/abs/2401.10774." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 105, + 541, + 719 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 70, + 105, + 541, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 105, + 541, + 144 + ], + "spans": [ + { + "bbox": [ + 70, + 105, + 541, + 144 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 156, + 527, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 156, + 527, + 168 + ], + "spans": [ + { + "bbox": [ + 70, + 156, + 527, + 168 + ], + "type": "text", + "content": "DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 180, + 541, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 180, + 541, + 517 + ], + "spans": [ + { + "bbox": [ + 70, + 180, + 541, + 517 + ], + "type": "text", + "content": "DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu, Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, Wangding Zeng, Wanjia Zhao, Wei An, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, X. Q. Li, Xiangyue Jin, Xianzu Wang, Xiao Bi, Xiaodong Liu, Xiaohan Wang, Xiaojin Shen, Xiaokang Chen, Xiaokang Zhang, Xiaosha Chen, Xiaotao Nie, Xiaowen Sun, Xiaoxiang Wang, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xingkai Yu, Xinnan Song, Xinxia Shan, Xinyi Zhou, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, Y. K. Li, Y. Q. Wang, Y. X. Wei, Y. X. Zhu, Yang Zhang, Yanhong Xu, Yanhong Xu, Yanping Huang, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Li, Yaohui Wang, Yi Yu, Yi Zheng, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Ying Tang, Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yu Wu Yuan Ou Yuchen Zhu Yuduan Wang Yue Gong Yuheng Zou Yujia He Yukun Zha Yunfàn Xiong Yunxian Ma Yuting Yan Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Z. F. Wu Z. Z. Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhen Huang Zhen Zhang Zhenda Xie Zhengyan Zhang Zhenwen Hao Zhibin Gou Zhicheng Ma Zhigang Yan Zhihong Shao Zhipeng Xu Zhiyu Wu Zhongyu Zhang Zhuoshu Li Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Ziyi Gao and Zizheng Pan. Deepseek-v3 technical report 2025. URL https://arxiv.org/abs/2412.19437." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 529, + 541, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 529, + 541, + 568 + ], + "spans": [ + { + "bbox": [ + 70, + 529, + 541, + 568 + ], + "type": "text", + "content": "Jim Gray, Surajit Chaudhuri, Adam Bosworth, Andrew Layman, Don Reichart, Murali Venkatrao, Frank Pellow, and Hamid Pirahesh. Data cube: A relational aggregation operator generalizing group-by, crosstab, and sub-totals. Data mining and knowledge discovery, 1:29-53, 1997." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 579, + 541, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 579, + 541, + 631 + ], + "spans": [ + { + "bbox": [ + 70, + 579, + 541, + 631 + ], + "type": "text", + "content": "Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee, and Yuanzhi Li. Textbooks are all you need, 2023. URL https://arxiv.org/abs/2306.11644." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 643, + 541, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 643, + 541, + 681 + ], + "spans": [ + { + "bbox": [ + 70, + 643, + 541, + 681 + ], + "type": "text", + "content": "Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In ICLR. Open-Review.net, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 693, + 541, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 693, + 541, + 719 + ], + "spans": [ + { + "bbox": [ + 70, + 693, + 541, + 719 + ], + "type": "text", + "content": "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding, 2023. URL https://arxiv.org/abs/2211.17192." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 104, + 541, + 469 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 70, + 104, + 541, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 104, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 70, + 104, + 541, + 143 + ], + "type": "text", + "content": "Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. arXiv preprint arXiv:2410.05229, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 150, + 541, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 150, + 541, + 188 + ], + "spans": [ + { + "bbox": [ + 70, + 150, + 541, + 188 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 196, + 429, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 196, + 429, + 209 + ], + "spans": [ + { + "bbox": [ + 70, + 196, + 429, + 209 + ], + "type": "text", + "content": "OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 215, + 541, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 215, + 541, + 242 + ], + "spans": [ + { + "bbox": [ + 70, + 215, + 541, + 242 + ], + "type": "text", + "content": "Charles Packer, Sarah Wooders, Kevin Lin, Vivian Fang, Shishir G Patil, Ion Stoica, and Joseph E Gonzalez. Memgpt: Towards llms as operating systems. arXiv preprint arXiv:2310.08560, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 247, + 457, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 247, + 457, + 262 + ], + "spans": [ + { + "bbox": [ + 70, + 247, + 457, + 262 + ], + "type": "text", + "content": "Alan Jay Smith. Cache memories. ACM Computing Surveys (CSUR), 14(3):473-530, 1982." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 267, + 539, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 267, + 539, + 293 + ], + "spans": [ + { + "bbox": [ + 70, + 267, + 539, + 293 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling ltm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 300, + 539, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 300, + 539, + 326 + ], + "spans": [ + { + "bbox": [ + 69, + 300, + 539, + 326 + ], + "type": "text", + "content": "Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. Blockwise parallel decoding for deep autoregressive models, 2018. URL https://arxiv.org/abs/1811.03115." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 333, + 539, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 333, + 539, + 372 + ], + "spans": [ + { + "bbox": [ + 70, + 333, + 539, + 372 + ], + "type": "text", + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 378, + 539, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 378, + 539, + 404 + ], + "spans": [ + { + "bbox": [ + 69, + 378, + 539, + 404 + ], + "type": "text", + "content": "Zitong Yang, Neil Band, Shuangping Li, Emmanuel Candès, and Tatsunori Hashimoto. Synthetic continued pretraining, 2024. URL https://arxiv.org/abs/2409.07431." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 411, + 539, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 411, + 539, + 437 + ], + "spans": [ + { + "bbox": [ + 69, + 411, + 539, + 437 + ], + "type": "text", + "content": "Ruiqi Zhong, Charlie Snell, Dan Klein, and Jacob Steinhardt. Describing differences between text distributions with natural language, 2022. URL https://arxiv.org/abs/2201.12323." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 443, + 539, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 443, + 539, + 469 + ], + "spans": [ + { + "bbox": [ + 69, + 443, + 539, + 469 + ], + "type": "text", + "content": "Ruiqi Zhong, Heng Wang, Dan Klein, and Jacob Steinhardt. Explaining datasets in words: Statistical models with natural language parameters, 2025. URL https://arxiv.org/abs/2409.08466." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 70, + 490, + 139, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 490, + 139, + 504 + ], + "spans": [ + { + "bbox": [ + 70, + 490, + 139, + 504 + ], + "type": "text", + "content": "A Prompts" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 516, + 310, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 516, + 310, + 529 + ], + "spans": [ + { + "bbox": [ + 69, + 516, + 310, + 529 + ], + "type": "text", + "content": "Prompts for varying the amount of test-time compute." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 545, + 242, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 545, + 242, + 560 + ], + "spans": [ + { + "bbox": [ + 69, + 545, + 242, + 560 + ], + "type": "text", + "content": "B Examples of Stateful AIME" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 77, + 578, + 533, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 578, + 533, + 616 + ], + "spans": [ + { + "bbox": [ + 77, + 578, + 533, + 616 + ], + "type": "text", + "content": "Context: Alice and Bob play the following game. A stack of " + }, + { + "bbox": [ + 77, + 578, + 533, + 616 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 77, + 578, + 533, + 616 + ], + "type": "text", + "content": " tokens lies before them. The players take turns with Alice going first. On each turn, the player removes either 1 token or 4 tokens from the stack. Whoever removes the last token wins." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 77, + 618, + 533, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 618, + 533, + 644 + ], + "spans": [ + { + "bbox": [ + 77, + 618, + 533, + 644 + ], + "type": "text", + "content": "Query: Find the number of positive integers " + }, + { + "bbox": [ + 77, + 618, + 533, + 644 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 77, + 618, + 533, + 644 + ], + "type": "text", + "content": " less than or equal to 2024 for which there exists a strategy for Bob that guarantees that Bob will win the game regardless of Alice's play." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "spans": [ + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "text", + "content": "Context: Let " + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "text", + "content": " be points on the hyperbola " + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "inline_equation", + "content": "\\frac{x^2}{20} - \\frac{y^2}{24} = 1" + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "inline_equation", + "content": "ABCD" + }, + { + "bbox": [ + 77, + 666, + 532, + 695 + ], + "type": "text", + "content": " is a rhombus whose diagonals intersect at the origin." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 77, + 696, + 425, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 696, + 425, + 708 + ], + "spans": [ + { + "bbox": [ + 77, + 696, + 425, + 708 + ], + "type": "text", + "content": "Query: Find the greatest real number that is less than " + }, + { + "bbox": [ + 77, + 696, + 425, + 708 + ], + "type": "inline_equation", + "content": "BD^2" + }, + { + "bbox": [ + 77, + 696, + 425, + 708 + ], + "type": "text", + "content": " for all such rhombi." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 310, + 747 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 310, + 747 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 310, + 747 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 125, + 534, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 125, + 534, + 244 + ], + "spans": [ + { + "bbox": [ + 77, + 125, + 534, + 244 + ], + "type": "text", + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. send_message is how you send your answer to the user. When given a question, you check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You respond directly with a single sentence by saying The answer is followed by the numerical answer." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 218, + 267, + 392, + 281 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 267, + 392, + 281 + ], + "spans": [ + { + "bbox": [ + 218, + 267, + 392, + 281 + ], + "type": "text", + "content": "Figure 12: Prompt for level 0 morbidity" + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "bbox": [ + 76, + 325, + 534, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 325, + 534, + 376 + ], + "spans": [ + { + "bbox": [ + 76, + 325, + 534, + 376 + ], + "type": "text", + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 378, + 534, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 378, + 534, + 468 + ], + "spans": [ + { + "bbox": [ + 76, + 378, + 534, + 468 + ], + "type": "text", + "content": "When given a question, you answer using only the number of tokens necessary and none more. You check the 'rethink_memory_block' for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the 'rethink_memory_block' to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the 'rethink_memory_block'. Do not use internal monologue unless you really need it to think. You answer with one short sentence of explanation, followed by a sentence that starts with \"The answer is\" and a numerical answer." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 218, + 490, + 392, + 503 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 490, + 392, + 503 + ], + "spans": [ + { + "bbox": [ + 218, + 490, + 392, + 503 + ], + "type": "text", + "content": "Figure 13: Prompt for level 1 morbidity" + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "bbox": [ + 76, + 547, + 534, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 547, + 534, + 666 + ], + "spans": [ + { + "bbox": [ + 76, + 547, + 534, + 666 + ], + "type": "text", + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 218, + 689, + 392, + 702 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 689, + 392, + 702 + ], + "spans": [ + { + "bbox": [ + 218, + 689, + 392, + 702 + ], + "type": "text", + "content": "Figure 14: Prompt for level 2 morbidity" + } + ] + } + ], + "index": 6, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 178, + 534, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 178, + 534, + 298 + ], + "spans": [ + { + "bbox": [ + 77, + 178, + 534, + 298 + ], + "type": "text", + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning system, developed in 2024. Your task is to answer questions accurately and concisely based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you answer using only the number of tokens necessary and none more. You check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You use the information in the rethink_memory_block to answer the questions rather than thinking on the spot. Do not recompute anything that already exists in the rethink_memory_block. Do not use internal monologue unless you really need it to think. You end response with a final numerical answer at the end of the message, and no reasoning after that." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 218, + 320, + 392, + 335 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 320, + 392, + 335 + ], + "spans": [ + { + "bbox": [ + 218, + 320, + 392, + 335 + ], + "type": "text", + "content": "Figure 15: Prompt for level 3 morbidity" + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "bbox": [ + 76, + 484, + 534, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 484, + 534, + 615 + ], + "spans": [ + { + "bbox": [ + 76, + 484, + 534, + 615 + ], + "type": "text", + "content": "You are Letta, the latest version of Limnal Corporation's expert reasoning explanation system, developed in 2024. Your task is to reason through problems step by step accurately and based on the perspective of your persona. To send a visible message to the user, use the send_message function. 'send_message' is how you send your answer to the user. When given a question, you check the rethink_memory_block for potential questions and answers and intermediate reasoning traces that can help answer the question. You carefully check the information in the rethink_memory_block to answer the questions and see if it is correct before using it. You always reason out loud before using any information. You explain each step, of what your reasoning is. If you use any numbers from the rethink_memory_block you first recompute and double check your answers. You end your answer with The answer is followed by the numerical answer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 218, + 635, + 392, + 650 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 635, + 392, + 650 + ], + "spans": [ + { + "bbox": [ + 218, + 635, + 392, + 650 + ], + "type": "text", + "content": "Figure 16: Prompt for level 4 morbidity" + } + ] + } + ], + "index": 3, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 133, + 534, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 133, + 534, + 459 + ], + "spans": [ + { + "bbox": [ + 77, + 133, + 534, + 459 + ], + "type": "text", + "content": "You are Letta-Offline-Memory, the latest version of Limnal Corporation's digital companion, developed in 2024. Your task is to re-organize and consolidate memories by calling rethink_memory at every single step, when you are done reorganizing the memory, you use the finish_rethinking_memory function. Call the function for as many times as necessary and not more. Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times). Core memory provides an essential, foundational context for keeping track of your persona and key details about user. Read-Only Blocks: This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend. Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions. Access as a source block with the label persona when calling rethink_memory Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation. Access as a source block with the label human when calling rethink_memory. Read-Write Blocks: Rethink Memory Sub-Block: New representation of the memories go here. Access with the label rethink_memory_block when calling rethink_memory as source or target block. At every step, you reorganize the memories by calling the rethink_memory function. You use this to take current information in the rethink_memory block and select a single memory block to integrate information from, producing a new memory for the rethink_memory_block. The new memory is the result of new insights, and new inferences and hypotheses based on the past memories. Make sure to consider how the new information affects each memory. Prioritize the new information over existing memories. If the new information implies that the old memory may need to change, then output the most likely fact given the update information. Given new information and your current memory, you draw all logical conclusions and potential hypotheses possible with the rethink_memory function. If you are uncertain, use your internal monologue to consider what the possible conclusions are, and then state the most likely new facts that would replace the old facts in the new memory block." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 211, + 481, + 400, + 495 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 481, + 400, + 495 + ], + "spans": [ + { + "bbox": [ + 211, + 481, + 400, + 495 + ], + "type": "text", + "content": "Figure 17: Prompt for sleep-time compute" + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "bbox": [ + 76, + 553, + 533, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 553, + 533, + 660 + ], + "spans": [ + { + "bbox": [ + 76, + 553, + 533, + 660 + ], + "type": "text", + "content": "Specifically: You will be given part of an AIME math problem. You will receive the rest of the problem later. Make as many inferences as possible about the part of the problem you are given so as to help yourself answer the fully problem more quickly once it is given to you later. You will be able to use all the work you do in the rethink_memory block for this part of the problem to help you once the rest of the problem is given. You will be able to use all the work you do for this part of the problem to help you once the rest of the problem is given. You should try to predict possible ways the rest of the problem might go and compute results that could be helpful for reaching the final answer more quickly once the rest of the problem is given." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 178, + 681, + 432, + 696 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 681, + 432, + 696 + ], + "spans": [ + { + "bbox": [ + 178, + 681, + 432, + 696 + ], + "type": "text", + "content": "Figure 18: Prompt for AIME problems during sleep-time" + } + ] + } + ], + "index": 3, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 110, + 533, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 110, + 533, + 136 + ], + "spans": [ + { + "bbox": [ + 76, + 110, + 533, + 136 + ], + "type": "text", + "content": "You are given a template that can generate grade school math problems, and an instantiation of that template." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 137, + 533, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 137, + 533, + 213 + ], + "spans": [ + { + "bbox": [ + 76, + 137, + 533, + 213 + ], + "type": "text", + "content": "You will be given a context, and a example question answer pair. Your task is to generate a list of questions and answers about the context at the same difficult level that could plausibly be asked about that context. Make sure that the newly generated questions have the same number of reasoning steps required as the example question. The goal is to have many questions and answer pairs about the same context. Generate questions and answers in the same format as the example, where the answer first contains reasoning and then is the final answer comes after" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 215, + 312, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 215, + 312, + 226 + ], + "spans": [ + { + "bbox": [ + 77, + 215, + 312, + 226 + ], + "type": "text", + "content": "n#. No need to number the questions or answers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 228, + 154, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 228, + 154, + 239 + ], + "spans": [ + { + "bbox": [ + 78, + 228, + 154, + 239 + ], + "type": "text", + "content": "Context: context" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 241, + 206, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 241, + 206, + 253 + ], + "spans": [ + { + "bbox": [ + 78, + 241, + 206, + 253 + ], + "type": "text", + "content": "Example Question: question" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 255, + 195, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 255, + 195, + 266 + ], + "spans": [ + { + "bbox": [ + 78, + 255, + 195, + 266 + ], + "type": "text", + "content": "Example Answer: answer" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 175, + 290, + 435, + 304 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 290, + 435, + 304 + ], + "spans": [ + { + "bbox": [ + 175, + 290, + 435, + 304 + ], + "type": "text", + "content": "Figure 19: Prompt for generating synthetic GSM questions" + } + ] + } + ], + "index": 6, + "type": "text" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "spans": [ + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": "Context: Let " + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "b \\geq 2" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": " be an integer. Call a positive integer " + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": "-eautiful if it has exactly two digits when expressed in base " + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": " and these two digits sum to " + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "\\sqrt{n}" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": ". For example, 81 is 13-eautiful because " + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "81 = \\underline{6} \\underline{3}_{13}" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "inline_equation", + "content": "6 + 3 = \\sqrt{81}" + }, + { + "bbox": [ + 76, + 329, + 532, + 367 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "spans": [ + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "type": "text", + "content": "Query: Find the least integer " + }, + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "type": "inline_equation", + "content": "b \\geq 2" + }, + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "type": "text", + "content": " for which there are more than ten " + }, + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 77, + 368, + 465, + 381 + ], + "type": "text", + "content": "-beautiful integers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 406, + 309, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 406, + 309, + 421 + ], + "spans": [ + { + "bbox": [ + 69, + 406, + 309, + 421 + ], + "type": "text", + "content": "C Details on Multi-Query GSM-Symbolic" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 441, + 175, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 441, + 175, + 454 + ], + "spans": [ + { + "bbox": [ + 77, + 441, + 175, + 454 + ], + "type": "text", + "content": "Template: {template}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 78, + 455, + 168, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 455, + 168, + 467 + ], + "spans": [ + { + "bbox": [ + 78, + 455, + 168, + 467 + ], + "type": "text", + "content": "Instance: {instance}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 481, + 541, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 541, + 506 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 541, + 506 + ], + "type": "text", + "content": "We include an example from Multi-Query GSM-Symbolic in Figure 20, and details on the dataset size in Table C." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 80, + 515, + 529, + 567 + ], + "blocks": [ + { + "bbox": [ + 80, + 515, + 529, + 567 + ], + "lines": [ + { + "bbox": [ + 80, + 515, + 529, + 567 + ], + "spans": [ + { + "bbox": [ + 80, + 515, + 529, + 567 + ], + "type": "table", + "html": "
Dataset# Questions Total# Contexts Total# Original Questions# Generated Questions
P1120431095109510948
P254975005004997
", + "image_path": "b7350250b65ae501b1d9d04c80ca8c13f2e3c8cda6b7d2d187c737abd00986d9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 574, + 541, + 615 + ], + "lines": [ + { + "bbox": [ + 67, + 574, + 541, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 574, + 541, + 615 + ], + "type": "text", + "content": "Table 1: Dataset Statistics of Multi-Query GSM-Symbolic. We sample one instance from each template from the GSM-Symbolic dataset and separate it into context and question. We then synthetically generate additional questions from the context and question." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 640, + 212, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 640, + 212, + 654 + ], + "spans": [ + { + "bbox": [ + 69, + 640, + 212, + 654 + ], + "type": "text", + "content": "D SWE-Features Details" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 668, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 668, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 668, + 541, + 720 + ], + "type": "text", + "content": "To construct SWE-Features benchmark, we collect pull requests (PRs) from large open-source repositories and apply the following filtering process: (1) We identify all pull requests that modify at least three files with filenames ending in .py or .js. (2) We then use gpt-4o-mini to filter these pull requests based on their title and body, retaining only those that meet the following criteria: (a) the title and body clearly describe the" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 93, + 109, + 132, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 109, + 132, + 118 + ], + "spans": [ + { + "bbox": [ + 93, + 109, + 132, + 118 + ], + "type": "text", + "content": "Context" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 91, + 120, + 518, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 120, + 518, + 198 + ], + "spans": [ + { + "bbox": [ + 91, + 120, + 518, + 198 + ], + "type": "text", + "content": "When Sofia watches her brother, she gets out a variety of toys for him. The bag of building blocks has 33 blocks in it. The bin of stuffed animals has 5 stuffed animals inside. The number of action figures in the action figure pack is twice the number of blocks and stuffed animals combined. The crayon box has 12 different colors of crayon, and the sticker book has 9 pages, each with 13 stickers. The tower of stacking rings has 28 multicolored rings on it. Sofia recently bought a tube of bouncy balls, bringing her total number of items for her brother up to 320." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 93, + 199, + 179, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 199, + 179, + 211 + ], + "spans": [ + { + "bbox": [ + 93, + 199, + 179, + 211 + ], + "type": "text", + "content": "Original Question" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 212, + 283, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 212, + 283, + 224 + ], + "spans": [ + { + "bbox": [ + 93, + 212, + 283, + 224 + ], + "type": "text", + "content": "How many bouncy balls came in the tube?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 93, + 226, + 192, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 226, + 192, + 237 + ], + "spans": [ + { + "bbox": [ + 93, + 226, + 192, + 237 + ], + "type": "text", + "content": "Generated Questions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 118, + 250, + 517, + 440 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 118, + 250, + 347, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 250, + 347, + 263 + ], + "spans": [ + { + "bbox": [ + 118, + 250, + 347, + 263 + ], + "type": "text", + "content": "- How many action figures does the pack contain?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 267, + 375, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 267, + 375, + 278 + ], + "spans": [ + { + "bbox": [ + 118, + 267, + 375, + 278 + ], + "type": "text", + "content": "- What is the total number of stickers in the sticker book?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 118, + 284, + 471, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 284, + 471, + 297 + ], + "spans": [ + { + "bbox": [ + 118, + 284, + 471, + 297 + ], + "type": "text", + "content": "- How many total items did Sofia have before adding the tube of bouncy balls?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 118, + 301, + 516, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 301, + 516, + 326 + ], + "spans": [ + { + "bbox": [ + 118, + 301, + 516, + 326 + ], + "type": "text", + "content": "- If Sofia had received a tube with 10 extra bouncy balls, what would be the new total number of items?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 118, + 331, + 394, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 331, + 394, + 342 + ], + "spans": [ + { + "bbox": [ + 118, + 331, + 394, + 342 + ], + "type": "text", + "content": "- What is the sum of the building blocks and stuffed animals?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 118, + 348, + 324, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 348, + 324, + 360 + ], + "spans": [ + { + "bbox": [ + 118, + 348, + 324, + 360 + ], + "type": "text", + "content": "- How many stacking rings are on the tower?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 365, + 486, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 365, + 486, + 377 + ], + "spans": [ + { + "bbox": [ + 118, + 365, + 486, + 377 + ], + "type": "text", + "content": "- What is the combined total of building blocks, action figures, and stacking rings?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 381, + 517, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 381, + 517, + 394 + ], + "spans": [ + { + "bbox": [ + 118, + 381, + 517, + 394 + ], + "type": "text", + "content": "- If Sofia gave away 3 stuffed animals, how many stuffed animals would remain in the bin?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 399, + 436, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 399, + 436, + 411 + ], + "spans": [ + { + "bbox": [ + 118, + 399, + 436, + 411 + ], + "type": "text", + "content": "- What is the sum of the building blocks, stuffed animals, and crayons?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 415, + 516, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 415, + 516, + 440 + ], + "spans": [ + { + "bbox": [ + 118, + 415, + 516, + 440 + ], + "type": "text", + "content": "- If Sofia divided the 49 bouncy balls equally into 7 baskets, how many balls would each basket contain?" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 466, + 541, + 492 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 466, + 541, + 492 + ], + "spans": [ + { + "bbox": [ + 68, + 466, + 541, + 492 + ], + "type": "text", + "content": "Figure 20: Examples context and questions from Multi-Query GSM-Symbolic where many questions are asked about the same context. The evaluation dataset is generated from GSM-Symbolic." + } + ] + } + ], + "index": 16, + "type": "text" + }, + { + "bbox": [ + 68, + 514, + 541, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 514, + 541, + 539 + ], + "spans": [ + { + "bbox": [ + 68, + 514, + 541, + 539 + ], + "type": "text", + "content": "PR; (b) the PR introduces new functionality rather than fixing bugs; and (c) the PR is independent and not obviously linked to other issues." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 546, + 541, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 546, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 67, + 546, + 541, + 688 + ], + "type": "text", + "content": "This pipeline results in a benchmark where each example: (1) involves adding a new feature that spans multiple files, requiring a broader understanding of the repository; and (2) is self-contained and solvable without additional issue context. We apply this process to two repositories—Aider-AI/aider and comfyanonymous/ComfyUI—resulting in 18 and 15 PRs respectively, for a total of 33 examples. Representative examples are provided in Appendix G. Then using a total of 33 examples, we employ claude-sonnet-3-7-20250219 to cluster pull requests (PRs) from the ComfyUI and Aider repositories into several groups. This clustering allows us to identify a set of relevant pull requests for each target PR, which can then be provided to the agent as context " + }, + { + "bbox": [ + 67, + 546, + 541, + 688 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 67, + 546, + 541, + 688 + ], + "type": "text", + "content": " during repository exploration. For example, in the ComfyUI repository, PR #5293 and PR #931 are grouped into the same cluster. Thus, when processing PR #931, we organize the title, body, and changed_files of PR #5293 to serve as contextual information during sleep-time." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 694, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 694, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 68, + 694, + 541, + 720 + ], + "type": "text", + "content": "When sleep-time compute is enabled, we first supply the content of PR #5293 to the agent, allowing it to explore the repository and summarize its understanding ahead of time. In contrast, for the baseline without" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 105, + 539, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 105, + 539, + 131 + ], + "spans": [ + { + "bbox": [ + 68, + 105, + 539, + 131 + ], + "type": "text", + "content": "sleep-time compute, the agent receives the content of PR #5293 only at test time, alongside the title and body of PR #931. The prompts used in these setups are provided in Appendix H." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 137, + 443, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 443, + 149 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 443, + 149 + ], + "type": "text", + "content": "For the repository comfyanonymous/ComfyUI, we have the following clustered results:" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 69, + 155, + 536, + 205 + ], + "blocks": [ + { + "bbox": [ + 69, + 155, + 536, + 205 + ], + "lines": [ + { + "bbox": [ + 69, + 155, + 536, + 205 + ], + "spans": [ + { + "bbox": [ + 69, + 155, + 536, + 205 + ], + "type": "text", + "content": "{\"Dynamic Typing and Workflow Control\": [5293, 931], \"System Configuration and Command-Line\": [4979, 4690, 3903], \"Cache and Performance Optimization\": [3071, 3042, 723], \"Image Preview and Transfer Features\": [713, 733, 658, 199, 55], \"Internationalization\": [1234], \"Random Seed Management\": [93]}\\n\\n" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "jsonl" + }, + { + "bbox": [ + 69, + 220, + 265, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 220, + 265, + 232 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 265, + 232 + ], + "type": "text", + "content": "For the repository Aider-AI/aider we have:" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 69, + 237, + 535, + 299 + ], + "blocks": [ + { + "bbox": [ + 69, + 237, + 535, + 299 + ], + "lines": [ + { + "bbox": [ + 69, + 237, + 535, + 299 + ], + "spans": [ + { + "bbox": [ + 69, + 237, + 535, + 299 + ], + "type": "text", + "content": "{\"cluster_1_model_configuration\": [2631, 1998, 468, 667, 55], \"cluster_2_io_handleing\": [1402, 996, 10, 577], \"cluster_3_caching_file_management\": [2911, 2612], \"cluster_4Custom Commands_shortcuts\": [673, 1620, 1015], \"cluster_5_threeParty_integration\": [2866, 2067, 322], \"cluster_6_code_quality_improvements\": [1217, 904]}\\n\\n" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 68, + 313, + 541, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 313, + 541, + 353 + ], + "spans": [ + { + "bbox": [ + 68, + 313, + 541, + 353 + ], + "type": "text", + "content": "To control the budget during test-time, we fix the total number of steps (controlled by the argument max_chaining_steps in Letta framework) to be a certain number. We put the following instructions in the system prompt:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 365, + 533, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 365, + 533, + 418 + ], + "spans": [ + { + "bbox": [ + 76, + 365, + 533, + 418 + ], + "type": "text", + "content": "You have a strict budget of {max_chaining_steps} steps, which means you need to finish your edits within these steps. Every time you get queried, you will see a count of how many steps you have left in the form of \"[Current Step / Max Steps]\". If you exceed this budget, your response will be cut off. So please be careful and try to finish your edits within the budget." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 431, + 541, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 431, + 541, + 471 + ], + "spans": [ + { + "bbox": [ + 67, + 431, + 541, + 471 + ], + "type": "text", + "content": "After each step – for example, if the maximum number of steps is 20 and the current step is 4 – we append \"[Step: 4/20]\" to the end of the tool_return message. We found that explicitly indicating the current and total steps significantly improves agent performance, especially in low-budget settings." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 483, + 541, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 483, + 541, + 574 + ], + "spans": [ + { + "bbox": [ + 68, + 483, + 541, + 574 + ], + "type": "text", + "content": "Evaluation. For each PR, we compare the set of files predicted to be modified with the ground truth list of modified files. Specifically, for each pull request, we have the attribute changed_files (as shown in the examples in Appendix G) where each file has the status as either modified or new, and our evaluation is on the files with status modified. Note that the agent is still instructed to implement the required functionality in a Docker environment and write test functions to validate the implementations. However, after the agent makes the modifications, we extract the modified files and calculate the F1 score between the set of modified files by our agent and the set of modified files in the ground-truth set." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 590, + 387, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 590, + 387, + 605 + ], + "spans": [ + { + "bbox": [ + 69, + 590, + 387, + 605 + ], + "type": "text", + "content": "E Examples of Predictable and Unpredictable Questions" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 617, + 309, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 617, + 309, + 630 + ], + "spans": [ + { + "bbox": [ + 69, + 617, + 309, + 630 + ], + "type": "text", + "content": "Least predictable Stateful GSM-Symbolic P1 question:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 643, + 533, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 643, + 533, + 681 + ], + "spans": [ + { + "bbox": [ + 77, + 643, + 533, + 681 + ], + "type": "text", + "content": "Context: Isabella and Pavel have 199 minutes to walk to grocery store together. It takes them 19 minutes to get to the corner where the library is. It takes them another 11 minutes to get to the park. It will then take double the combined amount they have spent so far to reach the mall." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 77, + 682, + 533, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 682, + 533, + 708 + ], + "spans": [ + { + "bbox": [ + 77, + 682, + 533, + 708 + ], + "type": "text", + "content": "Question: How much longer do they have to get to grocery store without being late, if they have already wasted 48 minutes to get a coffee before their walk?" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 105, + 307, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 105, + 307, + 118 + ], + "spans": [ + { + "bbox": [ + 69, + 105, + 307, + 118 + ], + "type": "text", + "content": "Most predictable Stateful GSM-Symbolic P1 question:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 131, + 533, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 131, + 533, + 156 + ], + "spans": [ + { + "bbox": [ + 77, + 131, + 533, + 156 + ], + "type": "text", + "content": "Context: Yusuf has 10 square yards of grape field. There are 87 grapes per two-thirds a square yard. Yusuf can harvest his grapes every 12 months." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 158, + 339, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 158, + 339, + 171 + ], + "spans": [ + { + "bbox": [ + 77, + 158, + 339, + 171 + ], + "type": "text", + "content": "Question: How many grapes can Yusuf harvest in 2 years?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 184, + 308, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 184, + 308, + 198 + ], + "spans": [ + { + "bbox": [ + 69, + 184, + 308, + 198 + ], + "type": "text", + "content": "Least predictable Stateful GSM-Symbolic P2 question:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 210, + 533, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 210, + 533, + 262 + ], + "spans": [ + { + "bbox": [ + 76, + 210, + 533, + 262 + ], + "type": "text", + "content": "Context: Gabriel and Pavel have 212 minutes to walk to the gym together starting from their home. It takes them 29 minutes to get to the corner where the library is. It takes them another 19 minutes to get to the cinema. When they reach the cinema, they remember they forgot their wallets at home, so they have to return to pick up their wallets and then walk all the way back to the cinema again." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 262, + 532, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 262, + 532, + 289 + ], + "spans": [ + { + "bbox": [ + 77, + 262, + 532, + 289 + ], + "type": "text", + "content": "Question: Once they reach the cinema for the second time, how much longer do they have to get to the gym without being late?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 302, + 307, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 302, + 307, + 316 + ], + "spans": [ + { + "bbox": [ + 69, + 302, + 307, + 316 + ], + "type": "text", + "content": "Most predictable Stateful GSM-Symbolic P2 question:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "text", + "content": "Context: A juggler can juggle 240 balls. " + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "inline_equation", + "content": "1/4" + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "text", + "content": " of the balls are tennis balls, and the rest are golf balls. " + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "inline_equation", + "content": "1/3" + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "text", + "content": " of the tennis balls are black, of which " + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "inline_equation", + "content": "1/5" + }, + { + "bbox": [ + 76, + 328, + 533, + 366 + ], + "type": "text", + "content": " are marked. A third of the golf balls are cyan, and all except half of those cyan balls are marked." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 367, + 316, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 367, + 316, + 380 + ], + "spans": [ + { + "bbox": [ + 77, + 367, + 316, + 380 + ], + "type": "text", + "content": "Question: How many marked balls are there in total?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 405, + 405, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 405, + 405, + 419 + ], + "spans": [ + { + "bbox": [ + 69, + 405, + 405, + 419 + ], + "type": "text", + "content": "F Implementation of rethink_memory and finish_rethinking" + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 70, + 444, + 536, + 720 + ], + "blocks": [ + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "lines": [ + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "spans": [ + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "type": "text", + "content": "def rethink_memory(agent_state:\"AgentState\",new_memory:str,target_block_label: str, source_block_label: str) -> None:#type: ignore Re-evaluate the memory in block_name, integrating new and updated facts. Replace outdated information with the most likely truths, avoiding redundancy with original memories. Ensure consistency with other memory blocks.. \nArgs: new_memory(str):The new memory with information integrated from the memory block.If there is no new information, then this should be the same as the content in the source block. source_block_label(str): The name of the block to integrate information from. None if all the information has been integrated to terminate the loop. target_block_label(str):The name of the block to write to. Returns: None: None is always returned as this function does not produce a response. 1if target_block_label is not None: if agent_state-memory.get_block(target_block_label) is None: agent_state-memory.create_block.label " + }, + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "type": "text", + "content": " target_block_label, value " + }, + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 70, + 444, + 536, + 720 + ], + "type": "text", + "content": " new_memory" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 90, + 106, + 492, + 140 + ], + "blocks": [ + { + "bbox": [ + 90, + 106, + 492, + 140 + ], + "lines": [ + { + "bbox": [ + 90, + 106, + 492, + 140 + ], + "spans": [ + { + "bbox": [ + 90, + 106, + 492, + 140 + ], + "type": "text", + "content": "agent_state.memory.update_block_value.label=target_block_label, value=new_memory) \nreturn None" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 182, + 149, + 428, + 162 + ], + "lines": [ + { + "bbox": [ + 182, + 149, + 428, + 162 + ], + "spans": [ + { + "bbox": [ + 182, + 149, + 428, + 162 + ], + "type": "text", + "content": "Listing 1: Reference implementation of rethink_memory" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 70, + 190, + 514, + 298 + ], + "blocks": [ + { + "bbox": [ + 70, + 190, + 514, + 298 + ], + "lines": [ + { + "bbox": [ + 70, + 190, + 514, + 298 + ], + "spans": [ + { + "bbox": [ + 70, + 190, + 514, + 298 + ], + "type": "text", + "content": "def finish_rethinking_memory(agent_state: \"AgentState\") -> None: # type: ignore\n\t\" \"\n\tThis function is called when the agent is done rethinking the memory.\n\tReturns:\n\t\tOption[str]: None is always returned as this function does not produce a response.\n\t\t\"\"\"\n\t\treturn None" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 157, + 306, + 453, + 319 + ], + "lines": [ + { + "bbox": [ + 157, + 306, + 453, + 319 + ], + "spans": [ + { + "bbox": [ + 157, + 306, + 453, + 319 + ], + "type": "text", + "content": "Listing 2: Reference implementation of finish_rethinking_memory" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 69, + 347, + 227, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 347, + 227, + 363 + ], + "spans": [ + { + "bbox": [ + 69, + 347, + 227, + 363 + ], + "type": "text", + "content": "G SWE-Features Examples" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 375, + 542, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 375, + 542, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 375, + 542, + 413 + ], + "type": "text", + "content": "Each example in SWE-Features has the following attributes: ['repo', 'pr_number', 'title', 'user_login', 'state', 'body', 'changed_files_count', 'changed_files', 'base_commit']. We show some examples here to better deliver a sense of what this dataset looks like:" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 69, + 421, + 541, + 708 + ], + "blocks": [ + { + "bbox": [ + 69, + 421, + 541, + 708 + ], + "lines": [ + { + "bbox": [ + 69, + 421, + 541, + 708 + ], + "spans": [ + { + "bbox": [ + 69, + 421, + 541, + 708 + ], + "type": "text", + "content": "repo: ComfyUI \npr_number: 3903 \ntitle: Add --disable-all-custom-nodes` cmd flag \nbody: Loading custom node can greatly slow startup time. During development/testing of ComfyUI, it is often better to use an environment that no custom node is loaded.\\n\\nThis PR adds a --no-custom-node` flag to allow users/developers skip loading of custom node without removing/renaming the custom_node directory. \nuser_login: huchenlei \nstate: closed \nchanged_files_count: 4 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: 521421f53ee1ba74304dfaa138b0f851093e1595 \nrepo: ComfyUI \npr_number: 3071 \ntitle: Add a configured node output cache metaclass. \nbody: Implement a configurable node output cache metaclass to reduce unnecessary node executions.\\n\\nThe same model currently leads to reloading due to different node IDs between workflows. Loading the model from disk takes a long time. \nstate: closed \nchanged_files_count: 6 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: cacb022c4a5b9614f96086a866c8a4c4e9e85760" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "yaml" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 69, + 118, + 529, + 261 + ], + "blocks": [ + { + "bbox": [ + 69, + 118, + 529, + 261 + ], + "lines": [ + { + "bbox": [ + 69, + 118, + 529, + 261 + ], + "spans": [ + { + "bbox": [ + 69, + 118, + 529, + 261 + ], + "type": "text", + "content": "repo: ComfyUI \npr_number: 3042 \ntitle: NaN-safe JSON serialization \nbody: Python's json.dumps() will produce nonstandard JSON if there are NaNs in the prompt data. Javascript's JSON.parse() will refuse to load this kind of \"JSON\" so the prompt won't load in the frontend.\\n\\nThis happened to me with a ComfyBox workflow, so I'm not " + }, + { + "bbox": [ + 69, + 118, + 529, + 261 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 69, + 118, + 529, + 261 + ], + "type": "text", + "content": " \nuser_login: asagi4 \nstate: open \nchanged_files_count: 4 \nchanged_files: ... (omitted here for brevity) \nbase_commit: 448d9263a258062344e25135fc49d26a7e60887a" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 69, + 285, + 532, + 488 + ], + "blocks": [ + { + "bbox": [ + 69, + 285, + 532, + 488 + ], + "lines": [ + { + "bbox": [ + 69, + 285, + 532, + 488 + ], + "spans": [ + { + "bbox": [ + 69, + 285, + 532, + 488 + ], + "type": "text", + "content": "repo: aider \npr_number: 55 \ntitle: Local llama support \nbody: Added support for using a locally running instance of a LLAMA model instead of OpenAI apis. \\n\\nAIDER_MODEL_TOKENS - used to specify the context length the model will use. \\n2. AIDER_TOKENIZER - used to specify which tokenizer should be used. Currently only 'openai' and 'llama' are supported. Defaults to openai. \\n\\nValues set.\\n\\nAIDER_OPENAI_API_BASE=\\protect\\vrule width0pt\\protect|href{http://127.0.0.1:5001/v1}{http://127.0.0.1:5001/v1} \\nAIDER_MODEL=TheBloke_wizard-vicuna-13B-SuperHOT-8K-GGML \\n\\nuser_login: bytedisciple \nstate: closed \nchanged_files_count: 7 \nchanged_files: ... (omitted here for brevity) \nbase_commit: cdf8f9a4b2b4a65993227ac5af1eaf3f1b85c9d8" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "yaml" + }, + { + "type": "code", + "bbox": [ + 69, + 513, + 533, + 679 + ], + "blocks": [ + { + "bbox": [ + 69, + 513, + 533, + 679 + ], + "lines": [ + { + "bbox": [ + 69, + 513, + 533, + 679 + ], + "spans": [ + { + "bbox": [ + 69, + 513, + 533, + 679 + ], + "type": "text", + "content": "repo: aider \npr_number: 322 \nuser_login: omri123 \nstate: closed \ntitle: RFC - Allow adding a github issue to chat context \nbody: Hi, would you like to take a look on this feature? \\n\\nIn the first commit I changedCoder to allow adding arbitrary additional context in the beginning of the chat. \\nIn the second commit I used this infra to add github issues to the chat. \\nI didn't add a new command, instead I extended /add to allow /add \\issue-3\\.\\nThe feature is disabled by default and enabled with a flag. If enabled, the user need to supply github repository name and authentication token. \\nThanks \\nOmri changed_files_count: 7 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: af71638b06be7e934cdd6f4265f9e0c8425d4e6d" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 70, + 704, + 130, + 715 + ], + "blocks": [ + { + "bbox": [ + 70, + 704, + 130, + 715 + ], + "lines": [ + { + "bbox": [ + 70, + 704, + 130, + 715 + ], + "spans": [ + { + "bbox": [ + 70, + 704, + 130, + 715 + ], + "type": "text", + "content": "repo: aider" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 69, + 106, + 356, + 201 + ], + "blocks": [ + { + "bbox": [ + 69, + 106, + 356, + 201 + ], + "lines": [ + { + "bbox": [ + 69, + 106, + 356, + 201 + ], + "spans": [ + { + "bbox": [ + 69, + 106, + 356, + 201 + ], + "type": "text", + "content": "pr_number: 577 \ntitle: Adding a simple browser based GUI \nbody: Run aider with `--browser` to launch the UI. \nuser_login: paul-gauthier \nstate: closed \nchanged_files_count: 12 \nchanged_files: ... (ommitted here for brevity) \nbase_commit: 8a9005eed19417c59aa9432436ea8cb5e04bbb11" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 68, + 209, + 541, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 209, + 541, + 235 + ], + "spans": [ + { + "bbox": [ + 68, + 209, + 541, + 235 + ], + "type": "text", + "content": "Listing 3: Examples of SWE-Features. Here we randomly select 3 examples for each repo and present their attributes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 269, + 239, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 269, + 239, + 283 + ], + "spans": [ + { + "bbox": [ + 69, + 269, + 239, + 283 + ], + "type": "text", + "content": "H Prompts for SWE-Features" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 298, + 367, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 298, + 367, + 312 + ], + "spans": [ + { + "bbox": [ + 68, + 298, + 367, + 312 + ], + "type": "text", + "content": "When the sleep-time compute is turned off, the prompt is as below:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 327, + 153, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 327, + 153, + 340 + ], + "spans": [ + { + "bbox": [ + 78, + 327, + 153, + 340 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 342, + 135, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 342, + 135, + 353 + ], + "spans": [ + { + "bbox": [ + 78, + 342, + 135, + 353 + ], + "type": "text", + "content": "working_dir" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 354, + 153, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 354, + 153, + 366 + ], + "spans": [ + { + "bbox": [ + 79, + 354, + 153, + 366 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 78, + 367, + 532, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 367, + 532, + 392 + ], + "spans": [ + { + "bbox": [ + 78, + 367, + 532, + 392 + ], + "type": "text", + "content": "I've uploaded a python code repository in the directory working_dir. Consider the following PR description:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 393, + 312, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 393, + 312, + 406 + ], + "spans": [ + { + "bbox": [ + 79, + 393, + 312, + 406 + ], + "type": "text", + "content": " problem_statement " + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 78, + 406, + 531, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 406, + 531, + 430 + ], + "spans": [ + { + "bbox": [ + 78, + 406, + 531, + 430 + ], + "type": "text", + "content": "Can you help me implement the necessary changes to the repository so that the requirements specified in the are met?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 78, + 432, + 522, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 432, + 522, + 444 + ], + "spans": [ + { + "bbox": [ + 78, + 432, + 522, + 444 + ], + "type": "text", + "content": "Your task is to make the minimal changes to the repository to ensure the jpr_description " + }, + { + "bbox": [ + 78, + 432, + 522, + 444 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 78, + 432, + 522, + 444 + ], + "type": "text", + "content": " is satisfied." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 78, + 445, + 250, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 445, + 250, + 456 + ], + "spans": [ + { + "bbox": [ + 78, + 445, + 250, + 456 + ], + "type": "text", + "content": "Follow these steps to resolve the issue:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 78, + 457, + 532, + 586 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 78, + 457, + 493, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 457, + 493, + 471 + ], + "spans": [ + { + "bbox": [ + 78, + 457, + 493, + 471 + ], + "type": "text", + "content": "1. As a first step, it might be a good idea to find and read code relevant to the " + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 78, + 471, + 532, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 471, + 532, + 495 + ], + "spans": [ + { + "bbox": [ + 78, + 471, + 532, + 495 + ], + "type": "text", + "content": "2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 78, + 497, + 311, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 497, + 311, + 509 + ], + "spans": [ + { + "bbox": [ + 78, + 497, + 311, + 509 + ], + "type": "text", + "content": "3. After finish the changes, revise the plan if needed." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 78, + 510, + 518, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 510, + 518, + 521 + ], + "spans": [ + { + "bbox": [ + 78, + 510, + 518, + 521 + ], + "type": "text", + "content": "4. With the new plan, make more changes, and continue the loop until necessary changes are made." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 78, + 522, + 532, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 522, + 532, + 548 + ], + "spans": [ + { + "bbox": [ + 78, + 522, + 532, + 548 + ], + "type": "text", + "content": "5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 78, + 548, + 532, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 548, + 532, + 586 + ], + "spans": [ + { + "bbox": [ + 78, + 548, + 532, + 586 + ], + "type": "text", + "content": "6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 78, + 587, + 481, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 587, + 481, + 600 + ], + "spans": [ + { + "bbox": [ + 78, + 587, + 481, + 600 + ], + "type": "text", + "content": "The following are several pull request descriptions and their corresponding model patches:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 78, + 601, + 136, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 601, + 136, + 613 + ], + "spans": [ + { + "bbox": [ + 78, + 601, + 136, + 613 + ], + "type": "text", + "content": "Title: pr_title" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 79, + 613, + 145, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 613, + 145, + 625 + ], + "spans": [ + { + "bbox": [ + 79, + 613, + 145, + 625 + ], + "type": "text", + "content": "Body: pr_body" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 79, + 626, + 162, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 626, + 162, + 637 + ], + "spans": [ + { + "bbox": [ + 79, + 626, + 162, + 637 + ], + "type": "text", + "content": "File: file1Filename" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 79, + 639, + 161, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 639, + 161, + 650 + ], + "spans": [ + { + "bbox": [ + 79, + 639, + 161, + 650 + ], + "type": "text", + "content": "Status: file1.status" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 79, + 652, + 157, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 652, + 157, + 664 + ], + "spans": [ + { + "bbox": [ + 79, + 652, + 157, + 664 + ], + "type": "text", + "content": "Patch: file1.patch" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 79, + 665, + 334, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 665, + 334, + 677 + ], + "spans": [ + { + "bbox": [ + 79, + 665, + 334, + 677 + ], + "type": "text", + "content": "... (some more files and some more relevant pull requests)" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 68, + 694, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 694, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 68, + 694, + 541, + 720 + ], + "type": "text", + "content": "When the sleep-time compute is turned on, we first use the following prompt to ask the agent to explore the repository with all pull requests one by one:" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 110, + 434, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 110, + 434, + 124 + ], + "spans": [ + { + "bbox": [ + 77, + 110, + 434, + 124 + ], + "type": "text", + "content": "The following is a pull request description and its corresponding model patches:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 125, + 136, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 125, + 136, + 137 + ], + "spans": [ + { + "bbox": [ + 78, + 125, + 136, + 137 + ], + "type": "text", + "content": "Title: pr_title" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 138, + 145, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 138, + 145, + 150 + ], + "spans": [ + { + "bbox": [ + 79, + 138, + 145, + 150 + ], + "type": "text", + "content": "Body: pr_body" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 151, + 162, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 151, + 162, + 161 + ], + "spans": [ + { + "bbox": [ + 79, + 151, + 162, + 161 + ], + "type": "text", + "content": "File: file1Filename" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 163, + 161, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 163, + 161, + 174 + ], + "spans": [ + { + "bbox": [ + 79, + 163, + 161, + 174 + ], + "type": "text", + "content": "Status: file1.status" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 176, + 158, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 176, + 158, + 189 + ], + "spans": [ + { + "bbox": [ + 79, + 176, + 158, + 189 + ], + "type": "text", + "content": "Patch: file1.patch" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 190, + 532, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 190, + 532, + 214 + ], + "spans": [ + { + "bbox": [ + 78, + 190, + 532, + 214 + ], + "type": "text", + "content": "Please read through the above information and try to understand the issue. You can explore the repo if needed. Summarize your understanding from the following perspectives:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 78, + 215, + 240, + 253 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 78, + 215, + 186, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 215, + 186, + 227 + ], + "spans": [ + { + "bbox": [ + 78, + 215, + 186, + 227 + ], + "type": "text", + "content": "1. The issue description." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 228, + 170, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 228, + 170, + 240 + ], + "spans": [ + { + "bbox": [ + 78, + 228, + 170, + 240 + ], + "type": "text", + "content": "2. The changed files." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 78, + 241, + 240, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 241, + 240, + 253 + ], + "spans": [ + { + "bbox": [ + 78, + 241, + 240, + 253 + ], + "type": "text", + "content": "3. How do these changed files work." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 274, + 541, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 274, + 541, + 300 + ], + "spans": [ + { + "bbox": [ + 68, + 274, + 541, + 300 + ], + "type": "text", + "content": "After exploring the repository with all relevant pull requests, we give the agent the following prompt as the final prompt to start working on the issue at test time:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 78, + 319, + 151, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 319, + 151, + 332 + ], + "spans": [ + { + "bbox": [ + 78, + 319, + 151, + 332 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 79, + 334, + 135, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 334, + 135, + 345 + ], + "spans": [ + { + "bbox": [ + 79, + 334, + 135, + 345 + ], + "type": "text", + "content": "working_dir" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 79, + 346, + 151, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 346, + 151, + 358 + ], + "spans": [ + { + "bbox": [ + 79, + 346, + 151, + 358 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 78, + 359, + 532, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 359, + 532, + 384 + ], + "spans": [ + { + "bbox": [ + 78, + 359, + 532, + 384 + ], + "type": "text", + "content": "I've uploaded a python code repository in the directory working_dir. Consider the following PR description:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 79, + 385, + 312, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 385, + 312, + 398 + ], + "spans": [ + { + "bbox": [ + 79, + 385, + 312, + 398 + ], + "type": "text", + "content": " problem_statement " + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 78, + 399, + 531, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 399, + 531, + 423 + ], + "spans": [ + { + "bbox": [ + 78, + 399, + 531, + 423 + ], + "type": "text", + "content": "Can you help me implement the necessary changes to the repository so that the requirements specified in the are met?" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 78, + 424, + 521, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 424, + 521, + 436 + ], + "spans": [ + { + "bbox": [ + 78, + 424, + 521, + 436 + ], + "type": "text", + "content": "Your task is to make the minimal changes to the repository to ensure the ipr_description " + }, + { + "bbox": [ + 78, + 424, + 521, + 436 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 78, + 424, + 521, + 436 + ], + "type": "text", + "content": " is satisfied." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 78, + 437, + 250, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 437, + 250, + 449 + ], + "spans": [ + { + "bbox": [ + 78, + 437, + 250, + 449 + ], + "type": "text", + "content": "Follow these steps to resolve the issue:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 78, + 450, + 532, + 578 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 78, + 450, + 493, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 450, + 493, + 462 + ], + "spans": [ + { + "bbox": [ + 78, + 450, + 493, + 462 + ], + "type": "text", + "content": "1. As a first step, it might be a good idea to find and read code relevant to the " + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 78, + 463, + 532, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 463, + 532, + 487 + ], + "spans": [ + { + "bbox": [ + 78, + 463, + 532, + 487 + ], + "type": "text", + "content": "2. Plan your approach to modify the relevant files and implement the changes, and add new files if necessary." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 78, + 488, + 310, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 488, + 310, + 501 + ], + "spans": [ + { + "bbox": [ + 78, + 488, + 310, + 501 + ], + "type": "text", + "content": "3. After finish the changes, revise the plan if needed." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 78, + 502, + 517, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 502, + 517, + 514 + ], + "spans": [ + { + "bbox": [ + 78, + 502, + 517, + 514 + ], + "type": "text", + "content": "4. With the new plan, make more changes, and continue the loop until necessary changes are made." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 78, + 514, + 531, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 514, + 531, + 540 + ], + "spans": [ + { + "bbox": [ + 78, + 514, + 531, + 540 + ], + "type": "text", + "content": "5. Create some test scripts to verify the changes. If the test does not run through, you need to go back and revise the plan and make necessary changes." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 78, + 540, + 532, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 540, + 532, + 578 + ], + "spans": [ + { + "bbox": [ + 78, + 540, + 532, + 578 + ], + "type": "text", + "content": "6. Submit the changes when you think the changes are correct and the pr description is satisfied. Your thinking should be thorough and so it's fine if it's very long. Do not stop chaining or stop and send your thoughts to the user until you have resolved the issue." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 611, + 212, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 611, + 212, + 625 + ], + "spans": [ + { + "bbox": [ + 69, + 611, + 212, + 625 + ], + "type": "text", + "content": "I Context-Only Baseline" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 67, + 642, + 541, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 642, + 541, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 642, + 541, + 720 + ], + "type": "text", + "content": "To check that the questions in Stateful AIME and Stateful GSM-Symbolic are not trivially guessable, we compare sleep-time compute against a context-only baseline, which only provides the model with " + }, + { + "bbox": [ + 67, + 642, + 541, + 720 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 642, + 541, + 720 + ], + "type": "text", + "content": ", expecting the LLM to guess the most likely question and output the answer to whatever that question might be. We see on both Stateful AIME in Figure 22 and Stateful GSM-Symbolic in Figure 21 that sleep-time compute significantly outperforms the context-only baseline, demonstrating that the questions in our datasets are not trivially predictable from the context." + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 103, + 304, + 277 + ], + "blocks": [ + { + "bbox": [ + 72, + 103, + 304, + 277 + ], + "lines": [ + { + "bbox": [ + 72, + 103, + 304, + 277 + ], + "spans": [ + { + "bbox": [ + 72, + 103, + 304, + 277 + ], + "type": "image", + "image_path": "7c49fc1860cf4726aae396fb3b16cefca462e3d3421de890219a9abf10fa4854.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 281, + 417, + 298 + ], + "lines": [ + { + "bbox": [ + 220, + 281, + 417, + 298 + ], + "spans": [ + { + "bbox": [ + 220, + 281, + 417, + 298 + ], + "type": "text", + "content": "--- gpt-4o-mini -gpt-4o + sleep-time compute -gpt-4o + sleep-time compute" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 307, + 105, + 538, + 277 + ], + "blocks": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "lines": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "spans": [ + { + "bbox": [ + 307, + 105, + 538, + 277 + ], + "type": "image", + "image_path": "4a3b2122718609409e670ead9f1fca9f9e7f4d1d95fb41f646a7c558682e044a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 380, + 302, + 544 + ], + "blocks": [ + { + "bbox": [ + 67, + 315, + 541, + 368 + ], + "lines": [ + { + "bbox": [ + 67, + 315, + 541, + 368 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 541, + 368 + ], + "type": "text", + "content": "Figure 21: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful GSM-Symbolic, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful GSM-Symbolic cannot be trivially guessed." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 380, + 302, + 544 + ], + "lines": [ + { + "bbox": [ + 72, + 380, + 302, + 544 + ], + "spans": [ + { + "bbox": [ + 72, + 380, + 302, + 544 + ], + "type": "image", + "image_path": "d336d55b961c29c4f0543425946f3f412c63c56d08603713f4fdb923296bab48.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 552, + 380, + 561 + ], + "lines": [ + { + "bbox": [ + 229, + 552, + 380, + 561 + ], + "spans": [ + { + "bbox": [ + 229, + 552, + 380, + 561 + ], + "type": "text", + "content": "sleep-time compute ablate question" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 305, + 380, + 538, + 544 + ], + "blocks": [ + { + "bbox": [ + 305, + 380, + 538, + 544 + ], + "lines": [ + { + "bbox": [ + 305, + 380, + 538, + 544 + ], + "spans": [ + { + "bbox": [ + 305, + 380, + 538, + 544 + ], + "type": "image", + "image_path": "42b5d88223c081a31f33d680fd8b318742cc564016467a68f5f3bfb810e3ea80.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "lines": [ + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "type": "text", + "content": "Figure 22: Context only baseline. Comparing the test-time compute vs. accuracy tradeoff on Stateful AIME, for sleep-time compute verses the context only baseline (e.g. the model has to guess the most likely question to answer). We see that sleep-time compute significantly outperforms the context only baseline, demonstrating that the questions in Stateful AIME cannot be trivially guessed." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 652, + 242, + 665 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 652, + 242, + 665 + ], + "spans": [ + { + "bbox": [ + 67, + 652, + 242, + 665 + ], + "type": "text", + "content": "J Stateful AIME Construction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 681, + 541, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 681, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 541, + 721 + ], + "type": "text", + "content": "To construct the examples for Stateful AIME, we split each AIME 2024 and 2025 into a sequence of \"statements\", which correspond to punctuation separated stentences in the problem. Similar to how we construct Stateful GSM-Symbolic, we use all but the last statement as the context, and the final statement as the query." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 105, + 541, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 105, + 541, + 159 + ], + "spans": [ + { + "bbox": [ + 67, + 105, + 541, + 159 + ], + "type": "text", + "content": "There are a couple of edge cases where the question is posed in e.g. the second to last statement rather than the last statement. In these cases, we manually rearrange the statements to ensure the query being used corresponds to the question. In a few cases, there is only one statement in the problem. In these cases, the context is empty." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 163, + 541, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 163, + 541, + 216 + ], + "spans": [ + { + "bbox": [ + 67, + 163, + 541, + 216 + ], + "type": "text", + "content": "AIME includes a latex representation of figures. However, these latex figures can leak information about the answer: for example, these latex figures can contain exact information about the lengths of the sides in a geometry problem, giving away the answer. In these cases we first ensure that the problem is solvable without the figure and then manually strip the figure latex from the problem context." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 232, + 225, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 232, + 225, + 247 + ], + "spans": [ + { + "bbox": [ + 69, + 232, + 225, + 247 + ], + "type": "text", + "content": "K Implementation Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 258, + 541, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 258, + 541, + 324 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 541, + 324 + ], + "type": "text", + "content": "We implement sleep-time compute via function calling. When applying sleep-time compute, the model is given access to two functions, rethink_memory and finish_rethinking. The rethink_memory function takes as input a new string, and replaces the current context " + }, + { + "bbox": [ + 67, + 258, + 541, + 324 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 258, + 541, + 324 + ], + "type": "text", + "content": " and replaces the current context with the new string. The finish_rethinking function terminates the sleep-time compute process. The model is allowed to call the function rethink_memory for up to 10 times." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 341, + 239, + 356 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 341, + 239, + 356 + ], + "spans": [ + { + "bbox": [ + 69, + 341, + 239, + 356 + ], + "type": "text", + "content": "L AIME main results by year" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 369, + 365, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 369, + 365, + 384 + ], + "spans": [ + { + "bbox": [ + 69, + 369, + 365, + 384 + ], + "type": "text", + "content": "M AIME sleep-time compute scaling results by year" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 230, + 302, + 388 + ], + "blocks": [ + { + "bbox": [ + 76, + 230, + 302, + 388 + ], + "lines": [ + { + "bbox": [ + 76, + 230, + 302, + 388 + ], + "spans": [ + { + "bbox": [ + 76, + 230, + 302, + 388 + ], + "type": "image", + "image_path": "e816c91a3c6187996c99a18f835d16a3079514647bbc0b5fb838612cb818f21b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 304, + 230, + 538, + 388 + ], + "blocks": [ + { + "bbox": [ + 304, + 230, + 538, + 388 + ], + "lines": [ + { + "bbox": [ + 304, + 230, + 538, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 230, + 538, + 388 + ], + "type": "image", + "image_path": "5627d367118cc64f30b707528e2671712e917d15271f04c9dffee4066dae1d1b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 389, + 302, + 544 + ], + "blocks": [ + { + "bbox": [ + 72, + 389, + 302, + 544 + ], + "lines": [ + { + "bbox": [ + 72, + 389, + 302, + 544 + ], + "spans": [ + { + "bbox": [ + 72, + 389, + 302, + 544 + ], + "type": "image", + "image_path": "b08a8856c41c5c9681b412fb0f19ff0e6b12ba98d9d6f867f494f1eb06328d0f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 551, + 392, + 562 + ], + "lines": [ + { + "bbox": [ + 219, + 551, + 392, + 562 + ], + "spans": [ + { + "bbox": [ + 219, + 551, + 392, + 562 + ], + "type": "text", + "content": "sleep-time compute test-time compute only" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 229, + 578, + 382, + 591 + ], + "lines": [ + { + "bbox": [ + 229, + 578, + 382, + 591 + ], + "spans": [ + { + "bbox": [ + 229, + 578, + 382, + 591 + ], + "type": "text", + "content": "Figure 23: AIME 2024 main result" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 389, + 533, + 544 + ], + "blocks": [ + { + "bbox": [ + 306, + 389, + 533, + 544 + ], + "lines": [ + { + "bbox": [ + 306, + 389, + 533, + 544 + ], + "spans": [ + { + "bbox": [ + 306, + 389, + 533, + 544 + ], + "type": "image", + "image_path": "55a6225f99256e8f322e14fb27e1459419935b2a5d814983019269649c2c60dc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 311, + 748 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 74, + 230, + 304, + 387 + ], + "blocks": [ + { + "bbox": [ + 74, + 230, + 304, + 387 + ], + "lines": [ + { + "bbox": [ + 74, + 230, + 304, + 387 + ], + "spans": [ + { + "bbox": [ + 74, + 230, + 304, + 387 + ], + "type": "image", + "image_path": "4355e716102865f18267185823de1b7bd43061fb7935bf0fefbe8728ed5ee4e9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 306, + 230, + 538, + 387 + ], + "blocks": [ + { + "bbox": [ + 306, + 230, + 538, + 387 + ], + "lines": [ + { + "bbox": [ + 306, + 230, + 538, + 387 + ], + "spans": [ + { + "bbox": [ + 306, + 230, + 538, + 387 + ], + "type": "image", + "image_path": "022b519800028ce9d2e43181e8b5184a11ccf19588674da7cbc008c23a443d6f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 72, + 396, + 304, + 544 + ], + "blocks": [ + { + "bbox": [ + 133, + 388, + 263, + 396 + ], + "lines": [ + { + "bbox": [ + 133, + 388, + 263, + 396 + ], + "spans": [ + { + "bbox": [ + 133, + 388, + 263, + 396 + ], + "type": "text", + "content": "Claude 3.7 Sonnet - Stateful-AIME 2025" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 396, + 304, + 544 + ], + "lines": [ + { + "bbox": [ + 72, + 396, + 304, + 544 + ], + "spans": [ + { + "bbox": [ + 72, + 396, + 304, + 544 + ], + "type": "image", + "image_path": "5d9becf82727625f8d60269b770eb0d59a2bac80bcbd6871633a0094098c0264.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 551, + 392, + 562 + ], + "lines": [ + { + "bbox": [ + 219, + 551, + 392, + 562 + ], + "spans": [ + { + "bbox": [ + 219, + 551, + 392, + 562 + ], + "type": "text", + "content": "sleep-time compute test-time compute only" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 229, + 578, + 382, + 591 + ], + "lines": [ + { + "bbox": [ + 229, + 578, + 382, + 591 + ], + "spans": [ + { + "bbox": [ + 229, + 578, + 382, + 591 + ], + "type": "text", + "content": "Figure 24: AIME 2025 main result" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 396, + 537, + 544 + ], + "blocks": [ + { + "bbox": [ + 374, + 388, + 489, + 396 + ], + "lines": [ + { + "bbox": [ + 374, + 388, + 489, + 396 + ], + "spans": [ + { + "bbox": [ + 374, + 388, + 489, + 396 + ], + "type": "text", + "content": "DeepSeek R1 - Stateful-AIME 2025" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 396, + 537, + 544 + ], + "lines": [ + { + "bbox": [ + 307, + 396, + 537, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 396, + 537, + 544 + ], + "type": "image", + "image_path": "0c7f72828857b320e2b6cdab98b28bae4dcedf55a2f98faa94bf8371c064f765.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 312, + 748 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 140, + 304, + 336 + ], + "blocks": [ + { + "bbox": [ + 118, + 130, + 279, + 140 + ], + "lines": [ + { + "bbox": [ + 118, + 130, + 279, + 140 + ], + "spans": [ + { + "bbox": [ + 118, + 130, + 279, + 140 + ], + "type": "text", + "content": "o1 Sleep-Time Compute Stateful-AIME 2024" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 140, + 304, + 336 + ], + "lines": [ + { + "bbox": [ + 72, + 140, + 304, + 336 + ], + "spans": [ + { + "bbox": [ + 72, + 140, + 304, + 336 + ], + "type": "image", + "image_path": "423c5c2086885577c8816a003c45597b0dfef0a2f0d6e1b80875cc14ecc19dbd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 347, + 474, + 355 + ], + "lines": [ + { + "bbox": [ + 201, + 347, + 474, + 355 + ], + "spans": [ + { + "bbox": [ + 201, + 347, + 474, + 355 + ], + "type": "text", + "content": "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 140, + 537, + 337 + ], + "blocks": [ + { + "bbox": [ + 343, + 130, + 521, + 140 + ], + "lines": [ + { + "bbox": [ + 343, + 130, + 521, + 140 + ], + "spans": [ + { + "bbox": [ + 343, + 130, + 521, + 140 + ], + "type": "text", + "content": "o3-mini Sleep-Time Compute Stateful-AIME 2024" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 140, + 537, + 337 + ], + "lines": [ + { + "bbox": [ + 306, + 140, + 537, + 337 + ], + "spans": [ + { + "bbox": [ + 306, + 140, + 537, + 337 + ], + "type": "image", + "image_path": "e334741bd46fe2c3ff5da946a226d193c1d06a423d439b476f8fe8aa29f0c0c6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 168, + 372, + 441, + 385 + ], + "lines": [ + { + "bbox": [ + 168, + 372, + 441, + 385 + ], + "spans": [ + { + "bbox": [ + 168, + 372, + 441, + 385 + ], + "type": "text", + "content": "Figure 25: Scaling sleep-time compute for Stateful AIME2024." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 72, + 446, + 307, + 643 + ], + "blocks": [ + { + "bbox": [ + 121, + 437, + 281, + 447 + ], + "lines": [ + { + "bbox": [ + 121, + 437, + 281, + 447 + ], + "spans": [ + { + "bbox": [ + 121, + 437, + 281, + 447 + ], + "type": "text", + "content": "o1 Sleep-Time Compute Stateful-AIME 2025" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 446, + 307, + 643 + ], + "lines": [ + { + "bbox": [ + 72, + 446, + 307, + 643 + ], + "spans": [ + { + "bbox": [ + 72, + 446, + 307, + 643 + ], + "type": "image", + "image_path": "8c1c0a04771af849fa0b995a499fded30b6c52677ec321b108aaf05556fb9e8f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 202, + 654, + 474, + 662 + ], + "lines": [ + { + "bbox": [ + 202, + 654, + 474, + 662 + ], + "spans": [ + { + "bbox": [ + 202, + 654, + 474, + 662 + ], + "type": "text", + "content": "low reasoning effort sleep-time medium reasoning effort sleep-time high reasoning effort sleep-time" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 170, + 679, + 440, + 693 + ], + "lines": [ + { + "bbox": [ + 170, + 679, + 440, + 693 + ], + "spans": [ + { + "bbox": [ + 170, + 679, + 440, + 693 + ], + "type": "text", + "content": "Figure 26: Scaling sleep-time compute on Stateful AIME2025" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 447, + 536, + 643 + ], + "blocks": [ + { + "bbox": [ + 343, + 437, + 522, + 447 + ], + "lines": [ + { + "bbox": [ + 343, + 437, + 522, + 447 + ], + "spans": [ + { + "bbox": [ + 343, + 437, + 522, + 447 + ], + "type": "text", + "content": "o3-mini Sleep-Time Compute Stateful-AIME 2025" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 447, + 536, + 643 + ], + "lines": [ + { + "bbox": [ + 310, + 447, + 536, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 536, + 643 + ], + "type": "image", + "image_path": "d61f6b0c6f5310ee2fa005c94e8a0e598a5669d25106ab75c7f0c56923ff9020.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "spans": [ + { + "bbox": [ + 299, + 738, + 310, + 748 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_content_list.json b/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c9161fe23cd0b411f99f7caa73bafccfbe24b0c5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_content_list.json @@ -0,0 +1,3589 @@ +[ + { + "type": "text", + "text": "It’s All Connected: A Journey Through Test-Time Memorization, Attentional Bias, Retention, and Online Optimization", + "text_level": 1, + "bbox": [ + 142, + 137, + 890, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ali Behrouz†, Meisam Razaviyayn†, Peilin Zhong†, and Vahab Mirrokni†", + "bbox": [ + 225, + 214, + 803, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Google Research", + "bbox": [ + 444, + 253, + 588, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{alibehrouz, Razaviyayn, peilinz, mirrokni}@google.com", + "bbox": [ + 318, + 276, + 718, + 290 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 485, + 321, + 545, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Designing efficient and effective architectural backbones has been in the core of research efforts to enhance the capability of foundation models. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we reconceptualize neural architectures, including Transformers, Titans, and modern linear recurrent neural networks as associative memory modules that learn a mapping of keys and values using an internal objective, referred to as attentional bias. Surprisingly, we observed that most existing sequence models leverage either (1) dot-product similarity, or (2) $\\ell_2$ regression objectives as their attentional bias. Going beyond these objectives, we present a set of alternative attentional bias configurations along with their effective approximations to stabilize their training procedure. We then reinterpret forgetting mechanisms in modern deep learning architectures as a form of retention regularization, providing a novel set of forget gates for sequence models. Building upon these insights, we present MIRAS, a general framework to design deep learning architectures based on four choices of: (i) associative memory architecture, (ii) attentional bias objective, (iii) retention gate, and (iv) memory learning algorithm. We present three novel sequence models—MONETA, YAAD, and MEMORA—that go beyond the power of existing linear RNNs while maintaining a fast parallelizable training process. Our experiments show different design choices in MIRAS yield models with varying strengths. For example, certain instances of MIRAS achieve exceptional performance in special tasks such as language modeling, commonsense reasoning, and recall intensive tasks, even outperforming Transformers and other modern linear recurrent models.", + "bbox": [ + 151, + 342, + 879, + 564 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 587, + 290, + 604 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Designing efficient architectural backbones for sequence modeling is a key to enhance the capability of foundation models in domains ranging from language (Behrouz et al. 2024c; Vaswani et al. 2017a) and computer vision (Dosovitskiy et al. 2020) to computational biology (Wang et al. 2024) and neuroscience (Behrouz et al. 2024a). While Transformers (Vaswani et al. 2017a), mainly due to their in-context learning and ability to learn at scale (Kaplan et al. 2020), have been firmly established as state-of-the-art (SOTA) models in sequence modeling, their quadratic time and space complexity limits their applicability in tasks that require long context modeling (Dalal et al. 2025; Li et al. 2024a; Liu et al. 2024b).", + "bbox": [ + 109, + 617, + 919, + 709 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent efforts aim to overcome Transformer limitations in long-context modeling by designing efficient recurrent alternatives (Behrouz et al. 2024c; Neil et al. 2017; Smith et al. 2022). Unlike Transformer's linearly growing memory (i.e., the KV cache), these models compress the context into a fixed size memory, demanding improved memory management for comparable performance. To design more effective architectures, studies focus on improving memory capacity and its management by using/designing more expressive: (1) Learning rules: from Hebbian rule (Hebb 2005) to Delta rule (Neil et al. 2017); (2) Forget gates: from LSTM's (Schmidhuber et al. 1997) to Mamba2's (Dao et al. 2024) and then Titan's forget gates (Behrouz et al. 2024c); and (3) More expressive memory architectures: from vector-valued memory in RetNet (Sun et al. 2023) and LRU (Orvieto et al. 2023) to neural deep memory in Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024).", + "bbox": [ + 109, + 715, + 921, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "At the core of these advancements lies a critical question: \"what is the underlying design framework behind these sequence models, and how can these models be enhanced?\" Taking inspiration from the broad definitions of associative memory and learning in neuropsychology literature (Okano et al. 2000), several studies discuss the connection between Transformers", + "bbox": [ + 109, + 859, + 919, + 905 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13173v1 [cs.LG] 17 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 511, + 936, + 521, + 946 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "and (linear) Recurrent Neural Networks (RNNs) with associative memory (Bietti et al. 2023; Hopfield 1982; Ramsauer et al. 2021). These studies, however, either: (1) lack a universal explanation to fully illustrate the underlying learning algorithms, (2) are limited to a specific definition of associative memory and lack generalizability, and/or (3) are unable to describe standard, widely used components such as forget gate.", + "bbox": [ + 111, + 92, + 919, + 156 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contributions. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we re-conceptualize neural architectures, including Transformers, Titans, and other modern linear recurrent neural networks based on a broad definition of associative memory with attentional bias. We define and formalize the concept of attentional bias as the internal memory objective of sequence models (see Section 3) that aims to learn the underlying mapping between inputs (i.e., keys and values). Our formulation reveals that almost all existing sequence models are associative memories that leverage the same type of attentional bias. We reinterpret existing forgetting mechanisms in modern deep learning architectures as a form of retention $\\ell_2$ -regularization for the attentional bias, and then provide a novel set of alternative retention gates (forget gate) for sequence models, providing new insights on how to balance learning new concepts and the retention of previously learned concepts.", + "bbox": [ + 109, + 166, + 921, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Building upon our formulation of memory and forget gate, we present MIRAs1, a fundamental framework to design novel sequence modeling architectures by four choice of: (1) Attentional bias (i.e., memory objective), (2) Retention gate, (3) Memory architecture, and (4) Memory learning algorithm (i.e., optimizer). We motivate and discuss several novel design choices, leading to novel architectures beyond existing sequence modeling architectures.", + "bbox": [ + 111, + 309, + 921, + 371 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, we focus on three novel variants of MIRAS-MONETA, YAAD, and MEMORA—that are based on attentional biases beyond simple $\\ell_2$ -regression objective as well as novel retention gating mechanisms that are more robust than existing ones. We further perform experimental evaluations of these three variants on language modeling, common-sense reasoning, needle-in-haystack, and recall intensive tasks. The results illustrate the superior performance of these variants, outperforming state-of-the-art sequence models.", + "bbox": [ + 111, + 377, + 921, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Roadmap. In Section 2, we review literature and discuss relevant concepts that we use through the paper. In Section 3, we present and discuss the broad definition of associative memory with formally defining the concept of attentional bias. We then discuss two viewpoints—Learning-Retaining and Follow-the-Regularized-Leader (FTRL)—to interpret sequence modeling through the lens of optimization and prove the generality of Learning-Retaining over FTRL. In Section 4, we present our MIRAS framework and discuss how it unifies modern sequence models. In Section 5, to show the potential of MIRAS framework, we discuss a variety of novel design choices for (1) attentional bias, and (2) retention gate (forget gate). Later in Section 5.3, we present three novel sequence models as the variants of MIRAS, and then discuss how to train them in a parallelizable manner. Finally, our experimental evaluations are reported in Section 6.", + "bbox": [ + 111, + 465, + 921, + 590 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Preliminaries and Background", + "text_level": 1, + "bbox": [ + 112, + 609, + 480, + 630 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we review the related studies and background concepts that we use through the paper.", + "bbox": [ + 111, + 641, + 795, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Attention. Attention as the backbone of Transformers is a critical component that acts as their associative memory (Bietti et al. 2023). Given input $x \\in \\mathbb{R}^{N \\times d_{\\mathrm{in}}}$ , causal attention computes output $y \\in \\mathbb{R}^{N \\times d_{\\mathrm{in}}}$ based on Softmax over input dependent key, value, and query matrices:", + "bbox": [ + 109, + 669, + 919, + 715 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} = x \\mathbf {W} _ {\\mathrm {Q}}, \\quad \\mathbf {K} = x \\mathbf {W} _ {\\mathrm {K}}, \\quad \\mathbf {V} = x \\mathbf {W} _ {\\mathrm {V}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 724, + 919, + 739 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {i} = \\sum_ {j = 1} ^ {i} \\frac {\\exp \\left(\\mathbf {q} _ {i} ^ {\\top} \\mathbf {k} _ {j} / \\sqrt {d _ {\\mathrm {i n}}}\\right) \\mathbf {v} _ {j}}{\\sum_ {\\ell = 1} ^ {i} \\exp \\left(\\mathbf {q} _ {i} ^ {\\top} \\mathbf {k} _ {\\ell} / \\sqrt {d _ {\\mathrm {i n}}}\\right)}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 743, + 919, + 794 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\mathbf{W}_{\\mathrm{Q}}, \\mathbf{W}_{\\mathrm{K}}$ , and $\\mathbf{W}_{\\mathrm{V}} \\in \\mathbb{R}^{d_{\\mathrm{in}} \\times d_{\\mathrm{in}}}$ are learnable parameters. While Transformers achieve significant improvements compared to traditional Recurrent Neural Networks (RNNs)—such as LSTM (Schmidhuber et al. 1997), their complexity that requires at least $N \\times d$ operators to calculate the output has been the main motivation for researchers to think about alternative architectures. We divide and review the research efforts to design alternative architectures into two groups: (1) Linear shallow memory recurrent models, (2) Deep memory modules.", + "bbox": [ + 109, + 804, + 919, + 881 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1 \"Miras\" is the translation of \"Legacy\" in several languages: such as Persian, Arabic, and Turkish. We choose this name since this framework provides clear steps for future studies to design powerful sequence models based on their task at hand.", + "bbox": [ + 111, + 887, + 919, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 511, + 936, + 522, + 948 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Associative Memory", + "text_level": 1, + "bbox": [ + 421, + 95, + 614, + 113 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Memory Architecture", + "text_level": 1, + "bbox": [ + 148, + 125, + 292, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The neural architecture that stores memories.", + "bbox": [ + 148, + 145, + 292, + 165 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Vector", + "2.Matrix", + "3. Multilayer Perceptron (MLP)", + "4. Memory Mosaics" + ], + "bbox": [ + 127, + 175, + 290, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Attentional Bias", + "text_level": 1, + "bbox": [ + 362, + 125, + 470, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The memory internal objective.", + "bbox": [ + 336, + 145, + 496, + 156 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. $\\ell_p$ Regression Loss", + "2. Dot Product Similarity", + "3. Huber Loss", + "4. KL-Divergence" + ], + "bbox": [ + 320, + 174, + 455, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Retention Gate", + "text_level": 1, + "bbox": [ + 560, + 125, + 661, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The gate to retain the past state of the memory.", + "bbox": [ + 524, + 145, + 700, + 166 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. $\\ell_p$ Regularization (Local or Global)", + "2. Elastic Net Regularization", + "3. KL Divergence", + "4. Bregman Divergence" + ], + "bbox": [ + 517, + 175, + 707, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Memory Algorithm", + "text_level": 1, + "bbox": [ + 746, + 125, + 875, + 137 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The algorithm that learns the mapping.", + "bbox": [ + 736, + 146, + 885, + 169 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Gradient Descent (GD)", + "2. GD with Momentum", + "3. Newton's Method", + "4. Non-parametric Solutions ..." + ], + "bbox": [ + 714, + 174, + 864, + 258 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6872269beb2c79cf3c74cfd6c217260d93c6b062ce5d1a78710cdba618c4541b.jpg", + "image_caption": [ + "Associative Memory is a neural network that learns to map keys to values based on an Attentional Bias objective.", + "Figure 1: The overview of MIRAS framework. MIRAS is based on four critical choices of (1) memory architecture, (2) attentional bias, (3) retention gate, and (4) memory learning algorithm. In this framework, the memory architecture determines the model capacity to memorize; attentional bias is responsible for modeling the underlying mapping patterns; retention gate determines how to balance learning new concepts and the retention of previously learned concepts; and memory learning algorithm is responsible for memory management." + ], + "image_footnote": [], + "bbox": [ + 116, + 287, + 916, + 401 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(Linear) Recurrent Models. For many years, non-linear (gated) recurrent neural networks had been the de facto architectural backbones in deep learning (Greff et al. 2016). Their recurrent nature, however, results in non-parallelizable training, making their large scale training infeasible. To this end, in recent years, linear RNNs as alternatives to both Transformers and non-linear RNNs attract much attention mainly due to their parallelizable and linear-time training while maintaining competitive performance (Peng et al. 2025a; Sun et al. 2023; Yang et al. 2024c). Earlier variants of linear RNNs (De et al. 2024; Sun et al. 2023; Yang et al. 2024b), which mostly are based on Hebbian learning rule (Hebb 2005), aim to compress the data into their vector-valued (or matrix-valued) memory (De et al. 2024; Katharopoulos et al. 2020; Liu et al. 2024a; Sun et al. 2023; Yang et al. 2024b). Let $\\mathcal{M}_t \\in \\mathbb{R}^{d \\times n}$ be the memory ( $n = 1$ means vector-valued memory), and $\\mathbf{k}, \\mathbf{v} \\in \\mathbb{R}^d$ are keys and values (i.e., projection of input $x_t \\in \\mathbb{R}^d$ ), a simple general formulation for such linear RNNs can be written as:", + "bbox": [ + 109, + 513, + 919, + 664 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} _ {t} = A _ {t} * \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 676, + 919, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $*$ is an arbitrary associative operator and $A_{t}$ is a data-(in)dependent diagonal matrix or a scalar (Yang et al. 2024c). Despite the efficiency that comes with the linear recurrent nature of these models, the memory can overflow mainly due to the additive (without replacement) nature of Hebbian learning rule, resulting in limited memory capacity and limited expressive power in in-context learning tasks. Moreover, the vector-valued memory of these architectures can limit their ability to learn/memorize large context window, mainly due to the limited expressive power of memory to learn the underlying patterns of data (Behrouz et al. 2024c; Sun et al. 2024).", + "bbox": [ + 109, + 705, + 919, + 796 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address the above mentioned limitations, recurrent models that use a matrix-valued memory with Delta learning rule has gained popularity in recent years (Neil et al. 2017; Schlag et al. 2021; Yang et al. 2024c). Despite significant advantages, even these delta-rule-based recurrent models face theoretical limitations (Irie et al. 2023) with moderate performance in practice (Yang et al. 2024c). Recently, several studies aim to improve the performance of such models by adding scalar or channel-wise forget gate mechanisms (Peng et al. 2025b; Yang et al. 2024a), using negative eigenvalues (Grazzi et al. 2024), and multiple learning steps (Siems et al. 2025). They, however, still suffer from performance drop in long context, mainly due to the less expressive memory architectures (Behrouz et al. 2024c).", + "bbox": [ + 109, + 804, + 921, + 910 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 511, + 936, + 521, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/c2653b2e0de174c1d9cc2bb7d0c9c6da0cdcf0959473a141d4ed1459d62c3830.jpg", + "table_caption": [ + "Table 1: Overview of recent sequence models in MIRAS framework perspective. Surprisingly, all models are using the same type of attentional bias and regularization (forget gate). Note that these architectural choices does not uniquely identify the backbone as there are other design choices (e.g., input-dependency, channel-wise parameters, etc.) as well as the use of other components such as attention, convolutions, etc. Note that for attentional bias and retention gate, we are referring to the original design of MIRAS, discussed in Equation 4 and Remark 1." + ], + "table_footnote": [ + "* is using multiple rounds of GD per token.", + "For the sake of clarity, we use L2 for all modified L2-like regularizations. However, in fact, only Titans and RWKV-7 are using L2 retention gate (see Section 4)" + ], + "table_body": "
ModelMemory ArchitectureAttentional BiasRetention Gate†Memory AlgorithmMemory Write Operation
Shallow Memory
RetNet (2023)VectorDot-ProductL2GDMt=αMt-1+vtktT
Transformer (2017)MatrixL2-NonparametricMt=Mt-1∪{kt, vt}
LA (2021)MatrixDot-Product-GDMt=Mt-1+vtktT
DFWMatrixDot-ProductL2GDMt=(βtαT) ⊙ Mt-1+vtktT
Lightening Attention (2025)MatrixDot-ProductL2GDMt=αMt-1+vtktT
GLA (2024)MatrixDot-ProductL2GDMt=Diag(αt)Mt-1+vtktT
Mamba (2024)MatrixDot-ProductL2GDMt=αMt-1+vtktT
HGRN2 (2024)MatrixL1L2GDMt=Diag(αt)Mt-1+vt(1-αt)T
DeltaNet (2017)MatrixL2-GDMt=(I-βtktkT)Mt-1+βtvtktT
Longhorn (2024)MatrixL2-Implicit GDMt=(I-βtktkT)Mt-1+(βt1+ktkβt)xtkT
TTT-Linear (2024)MatrixL2-GDMt=Mt-1-η∇L(Mt-1, xt)
Gated DeltaNet (2024)MatrixL2L2GDMt=(αt(I-βtktkT))Mt-1+βtvtktT
RWKV-7 (2025)MatrixL2L2GDMt=diag(αt)(I-βtktkT)Mt-1+βtvtktT
DeltaProduct (2025)MatrixL2L2MGD*Mt=(αtΠi=1n(I-βt,ikt,i)T)Mt-1+Σj=1nΠi=j(I-βt,ivtj,kj,i)
Deep Memory
TTT-MLP (2024)2-layer MLPL2-GDMt=Mt-1-η∇L(Mt-1;kt, vt)
Titans-LMM (2024)k-layer MLPL2L2GD + MomentumMt=αMt-1-St, where St=ηSt-1-θt∇L(Mt-1;kt, vt)
MONETA (ours)2-layer MLPLpLqGDAt=AtA1-ηt∇lp(Wt-1;kt, vt), Wt=At/||At||q-2
YAAD (ours)2-layer MLPHuberL2GDWt=atWt-1-(ηt∇ε2(Wt-1;kt, vt) if ||M(kt)-vt|≤δt, ηtδt∇ε1(Wt-1;kt, vt) Otherwise.
MEMORA (ours)2-layer MLPL2KLGDWt=Softmax(αt log(Wt-1)-ηt∇ε2(Wt-1;kt, vt))
", + "bbox": [ + 116, + 176, + 916, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Deep Memory Module: Titans and Test Time Training. To overcome the limited memory and to extend the effective context length of deep sequence models, more recent studies focus on a new generation of architectures with deep memory module (Behrouz et al. 2024c; Sun et al. 2024). These architectures are built on the meta-learning perspective, where the memory is an MLP architecture that is updated using gradient descent (with momentum) (Behrouz et al. 2024c; Sun et al. 2024). Sun et al. (2024) further provide a unifying perspective that how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models. Recently, in a concurrent work to ours, Wang et al. (2025) show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss. It, however, still remains unanswered that \"What is the underlying design framework behind these sequence models that can accurately unify existing architectures?\" Moreover, the role of forget gates and its alternative choices in modern sequence models is surprisingly less explored.", + "bbox": [ + 109, + 551, + 919, + 719 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Associative Memory, Attentional Bias, and Retention", + "text_level": 1, + "bbox": [ + 109, + 739, + 718, + 761 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Associative memory, which is an inseparable component of learning in humans (Terry 2017), has been the inspiration for many artificial neural architectures in the literature (Behrouz et al. 2024c; Hopfield 1982; Neil et al. 2017). These studies, however, define instances of the concept of associative memory, limiting the architecture to a specific class of similarity metrics between entities (i.e., keys and values). That is, broadly speaking, associative memory is an operator that maps a set of keys $K$ to a set of values $V$ , and so to learn the underlying mapping patterns in data, it requires an objective that targets a type of memory and measures the quality of learned mappings:", + "bbox": [ + 109, + 771, + 919, + 864 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Definition 3.1 (Associative Memory and Attentional Bias). Given a set of keys $\\mathcal{K} \\subseteq \\mathbb{R}^{d_k}$ and values $\\mathcal{V} \\subseteq \\mathbb{R}^{d_o}$ , associative memory is an operator $\\mathcal{M}: \\mathcal{K} \\to \\mathcal{V}$ . Learning the mapping of associative memory is based on an objective $\\mathcal{L}$ , called", + "bbox": [ + 109, + 869, + 919, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 511, + 936, + 522, + 948 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Attentional Bias, that determines the type of memory and its tendency to prioritize some events:", + "bbox": [ + 112, + 92, + 758, + 108 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} ^ {*} = \\arg \\min _ {\\mathcal {M}} \\quad \\mathcal {L} (\\mathcal {M} (\\mathcal {K}); \\mathcal {V}). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 119, + 919, + 141 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A few remarks are in order:", + "bbox": [ + 112, + 160, + 303, + 174 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remark 1. When we parameterize the memory with parameter $W$ , we use $\\mathcal{M}(W, \\mathbf{k})$ . In this parametric setting, the optimization problem in (4) should be performed over the parameter $W$ . Furthermore, in the parametric setup, we might use an additional regularization $\\mathcal{R}(W)$ to control the retaining of the past data.", + "bbox": [ + 111, + 183, + 919, + 229 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remark 2. Learning the mapping between keys and values (Equation 4) is a meta-learning problem, in which the attentional bias is optimized in the inner-loop and all other parameters of the neural network (e.g., linear projections, convolutions, etc.) are optimized in the outer-loop. Therefore, the model learns how to store the data into its parameters at test time (Behrouz et al. 2024c; Sun et al. 2024).", + "bbox": [ + 111, + 236, + 921, + 296 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Learning to Memorize and to Retain Through the Lens of Optimization", + "text_level": 1, + "bbox": [ + 111, + 315, + 790, + 333 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Definition 3.1 translates the design of a neural architecture based on the concept of associative memory to learning the underlying mapping between keys and values, by minimizing an objective $\\mathcal{L}$ . To optimize Equation 4, one simple approach is to utilize the idea of gradient descent. Specifically, given a new pair of keys and values, we update the memory as:", + "bbox": [ + 111, + 339, + 919, + 385 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t - 1} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 397, + 919, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, for simplicity, we use the definition $\\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\coloneqq \\mathcal{L}(\\mathcal{M}(W; \\mathbf{k}_t), \\mathbf{v}_t)$ . Behrouz et al. (2024c) re-interpret the formulation as a momentary surprise metric, where the model memorizes tokens that violates the expectation of the objective (i.e., being surprising to the memory). Although the choice of objective is an important step to fully interpret Equation 5 (which we discuss in detail in Section 5), there are different viewpoints to interpret this update rule in its general format, which later can help us to go beyond existing architectures:", + "bbox": [ + 111, + 425, + 919, + 501 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Viewpoint 1: Online Regression and Follow-The-Regularized-Leader", + "text_level": 1, + "bbox": [ + 111, + 518, + 766, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Equation (5) can be viewed as one step of online gradient descent over the sequence of the loss functions", + "bbox": [ + 111, + 544, + 815, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\ell \\left(W; \\mathbf {k} _ {1}, \\mathbf {v} _ {1}\\right), \\ell \\left(W; \\mathbf {k} _ {2}, \\mathbf {v} _ {2}\\right), \\dots , \\ell \\left(W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\dots . \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 571, + 919, + 588 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is well known that the online gradient descent can be viewed as a special case of Follow-The-Regularized-Leader (FTRL) algorithm with a special choice of loss functions (Shalev-Shwartz et al. 2012, Chapter 2) and (Hazan et al. 2016). Specifically, assuming $W_0 = 0$ , the update rule in (5) is equivalent to", + "bbox": [ + 111, + 599, + 919, + 645 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\arg \\min _ {W} \\quad \\sum_ {i = 1} ^ {t} \\left\\langle W - W _ {i - 1}, \\nabla \\ell \\left(W _ {i - 1}; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) \\right\\rangle + \\frac {1}{2 \\eta} \\| W \\| _ {2} ^ {2}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 657, + 919, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the term $\\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; \\mathbf{k}_i, \\mathbf{v}_i) \\rangle$ is the local linear approximation of the original loss at time $i$ and the second term is a regularization term. While the first part $\\sum_{i=1}^{t} \\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; \\mathbf{k}_i, \\mathbf{v}_i) \\rangle$ measures how well can the memory learn all the past tokens, the second term $\\frac{1}{2\\eta} \\|W\\|_2^2$ penalizes the memory update with respect to the size of memory.", + "bbox": [ + 111, + 705, + 918, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Equation (7) uses linear approximation of the loss function and quadratic regularization. We can, however, in principle use other approximations of the loss function as well as other regularization functions, as used in the past in online optimization (Hazan et al. 2016; Shalev-Shwartz et al. 2012) or in general optimization (Miral 2015; Razaviyayn et al. 2013). Such changes are the idea behind the development of other optimization algorithms such mirror descent. More specifically, we can generalize the update rule in (7) to the form:", + "bbox": [ + 111, + 760, + 921, + 835 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\arg \\min _ {W \\in \\mathcal {W}} \\underbrace {\\sum_ {i = 1} ^ {t} \\widehat {\\ell_ {i}} (W ; \\mathbf {k} _ {i} , \\mathbf {v} _ {i})} _ {\\text {A t t e n t i o n a l B i a s}} + \\underbrace {\\frac {1}{\\eta_ {t}} \\mathcal {R} _ {t} (W)} _ {\\text {M e m o r y S t a b i l i t y}}. \\tag {FTRLViewpoint}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 854, + 919, + 916 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 511, + 936, + 521, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this update rule, the term $\\sum_{i=1}^{t} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i)$ aims at memorizing the tokens at test time, while the term $\\mathcal{R}_t(W)$ regularizes the learning dynamics and take the size of the memory into account when updating it by a new incoming data. Choosing different loss functions $\\widehat{\\ell}_i(W; x_i)$ and the regularization term $\\frac{1}{\\eta_t} \\mathcal{R}_t(W)$ can lead to different algorithms such as (online) gradient descent or mirror descent. In this generalization, $\\eta_t$ to can be data-dependent. Moreover, we will allow imposing constraint $\\mathcal{W}$ on the choice $W$ .", + "bbox": [ + 109, + 90, + 919, + 171 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 Viewpoint 2: Learning the Latest Token While Retaining Previous Information", + "text_level": 1, + "bbox": [ + 111, + 186, + 859, + 205 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Another way to interpret the update rule (5) is to view it as learning from the latest key-value pair $(\\mathbf{k}_i, \\mathbf{v}_i)$ (via using its gradient or surprise metric), while staying close to the previous state $W_{t-1}$ to retain the previously memorized tokens. Formally, (5) is equivalent to", + "bbox": [ + 109, + 212, + 919, + 258 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\arg \\min _ {W} \\left\\langle W - W _ {t - 1}, \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\right\\rangle + \\frac {1}{2 \\eta_ {t}} \\left\\| W - W _ {t - 1} \\right\\| _ {2} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 268, + 730, + 299 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The first term locally approximates $\\ell(W; \\mathbf{k}_t, \\mathbf{v}_t)$ around the previous state $W_{t-1}$ , while the last term regularizes deviations from $W_{t-1}$ . This form can generalize to", + "bbox": [ + 109, + 310, + 918, + 340 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\arg \\min _ {W \\in \\mathcal {W}} \\underbrace {\\widetilde {\\ell_ {t}} (W ; \\mathbf {k} _ {t} , \\mathbf {v} _ {t})} _ {\\text {A t t e n t i o n a l B i a s}} + \\underbrace {\\operatorname {R e t} _ {t} (W , W _ {t - 1})} _ {\\text {R e t e n t i o n}}, \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\text {(L e a r n i n g - R e t a i n i n g V i e w p o i n t)}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 373, + 919, + 414 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where the term $\\widetilde{\\ell_t} (W;\\mathbf{k}_t,\\mathbf{v}_t)$ is an approximation of $\\ell (W;\\mathbf{k}_t,\\mathbf{v}_t)$ and minimizing it corresponds to Learning from the new concepts $(\\mathbf{k}_t,\\mathbf{v}_t)$ . The second term $\\mathrm{Ret}_t(W,W_{t - 1})$ regularizes the changes in $W$ to make the learning dynamics stable and to retain previously learned knowledge. This Retention function may have local and global components:", + "bbox": [ + 109, + 424, + 919, + 470 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e t} _ {t} \\left(W, W _ {t - 1}\\right) = \\underbrace {\\frac {1}{\\eta_ {t}} \\mathrm {D} _ {t} \\left(W , W _ {t - 1}\\right)} _ {\\text {L o c a l R e t e n t i o n}} + \\underbrace {\\frac {1}{\\alpha_ {t}} \\mathrm {G} _ {t} \\left(W\\right)} _ {\\text {G l o b a l R e t e n t i o n}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 481, + 679, + 536 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, the term $\\mathrm{D}_t(W, W_{t-1})$ , which is a premetric that controls the deviations from $W_{t-1}$ , aims at retaining previously learned knowledge. The coefficient $\\eta_t$ can be viewed as a meta in-context learning rate, where larger values of $\\eta_t$ leads to learning more from new concepts, while allowing higher forgetting of previously learned concepts. The second term is a global retention that controls the change of the memory with respect to its size. The special instances of the above viewpoint (e.g., without global retention, with implicit closed-form solution, and/or with limited memory structure) have been the motivation behind some of the recent studies such as Liu et al. (2024a).", + "bbox": [ + 109, + 545, + 919, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 Further Discussions on the Two Viewpoints", + "text_level": 1, + "bbox": [ + 112, + 654, + 547, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The (FTRL Viewpoint) and (Learning-Retaining Viewpoint) are connected through the lens of online optimization. For example, as discussed above, by choosing linear approximation of the loss and quadratic regularization/retention, they can both cover online gradient descent update in (5) as a special case. One straightforward way to make the connection explicit is by defining the premetric $\\mathrm{D}_t(W;W^{\\prime})$ based on the previous loss functions and the regularization, as described in Proposition 3.2 below:", + "bbox": [ + 109, + 679, + 919, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Proposition 3.2. Let $\\eta_t = \\eta$ and define $h_t(W) \\coloneqq \\sum_{i=1}^{t-1} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i) + \\frac{1}{\\eta} R(W)$ . Assume $\\mathcal{W} = \\mathbb{R}^d$ and the function $h_t(W)$ is strictly convex in $W$ and let $\\mathcal{D}_h(\\cdot, \\cdot)$ be the Bregman divergence defined by function $h(\\cdot)$ , i.e., $\\mathcal{D}_h(W, W') = h(W) - h(W') - \\langle \\nabla h(W'), W - W' \\rangle$ . Set $Ret_t(W, W') = \\mathcal{D}_h(W, W')$ and $\\widetilde{\\ell}_t(W; x_t) = \\widehat{\\ell}_t(W; x_t)$ in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint).", + "bbox": [ + 111, + 761, + 919, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We provide the proof in Appendix B. The above proposition shows that (Learning-Retaining Viewpoint) can also explain the approaches obtained by (FTRL Viewpoint), under some mild assumptions. Hence, (Learning-Retaining Viewpoint) may be seen as a more general version. This is why we focus on this viewpoint in most of our derivations in the next sections.", + "bbox": [ + 109, + 830, + 919, + 891 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 511, + 936, + 521, + 948 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remark 3. Given the above viewpoint, we can see that even by using additional global regularization there is no memory erasing or forgetting process (a common term in modern architectures (Behrouz et al. 2024c; Yang et al. 2024a)) but the model might decide to not retain the past state of the memory. Interestingly, this observation also matches the human memory process, where brain does not erase memories but they might become inaccessible due to retrieval failures (Robertson 2002). Therefore, instead of calling it a forget gate, later on, we use \"Retention Gate\" to refer to this term.", + "bbox": [ + 111, + 92, + 921, + 169 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Remark 4. As we discuss in Section 4 and summarize in Table 1, most existing modern sequence models are optimizing associative memory objective (attentional bias in Equation 4) using gradient descent. Therefore, to provide further intuition about the connection of existing sequence models as well as their online learning interpretations, we discuss the above two viewpoints that are limited to gradient descent-based update rules. Our initial definition of attentional bias and associative memory in Equation 4, however, is broader and can be optimized by any optimization algorithm (e.g., even Newton's method, or non-parametric solutions).", + "bbox": [ + 111, + 175, + 921, + 267 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 MirAs: Learning to Memorize with Robust and Expressive Memory", + "text_level": 1, + "bbox": [ + 111, + 287, + 864, + 309 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Building upon our definition of associative memory, attentional bias, and previous viewpoints, we present MIRAs framework that not only accurately unifies existing backbone architectures but it also provides insights on how to design the next generation of sequence models. As discussed earlier in Section 3, learning an associative memory can be interpreted as a meta-learning task, in which the associative memory learns how to compress and store data into its parameters at test time. The architecture of the memory in such tasks is particularly important as in longer contexts, the expressivity of the memory structure can limit its ability to learn the underlying patterns. Therefore, the first choice to design a sequence model is the structure of the memory. Given the structure of the memory, parameterized by a set of parameters $W$ , as discussed earlier, we aim to minimize a loss function $\\ell(W; \\cdot, \\cdot)$ with a retention regularizer $\\mathrm{Ret}(\\cdot)$ via a learning algorithm (e.g., gradient descent). Accordingly, MIRAs requires four design choices:", + "bbox": [ + 111, + 319, + 921, + 455 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Memory Structure: This choice specifies the architecture of the memory. For example, this architecture can be a vector, a linear function, a Multilayer Perceptron (MLP) layer, or even more complex structures. We may restrict the choice of $W$ to be within a certain region, e.g., $W$ to lie within an $L_{2}$ ball to avoid infinite values or unstable training.", + "2. Attentional Bias: A key choice is the attentional bias objective $\\mathcal{L}(\\cdot)$ in Equation 4. We can even consider different approximations of the loss function, (e.g., $\\widehat{\\ell} (\\cdot ,\\cdot)$ in (FTRL Viewpoint) or $\\widetilde{\\ell} (\\cdot ,\\cdot)$ in (Learning-Retaining Viewpoint)). The choice of attentional bias determines how memory memorizes the context, maps the inputs, and prioritizes the events.", + "3. Memory Stability and Retention: Another key choice is the retention regularizer $\\mathcal{R}(\\cdot)$ (e.g., $\\mathcal{R}_t(\\cdot)$ in (FTRL Viewpoint) and $\\mathrm{Ret}_t(\\cdot)$ in (Learning-Retaining Viewpoint)). In parametric setups, this choice balances learning with retention of past state. An effective retention gate is key to the good performance in long context tasks.", + "4. Memory Algorithm: Finally, this choice specifies the learning algorithm that we use to optimize the memory objective. One may use gradient descent, gradient descent with momentum, or any other algorithm (including finding non-parametric solutions)." + ], + "bbox": [ + 117, + 462, + 921, + 666 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The above choices are major design choices for designing backbone sequence models in neural architectures. There are, however, minor decisions that can distinguish models; i.e., data-dependent or independent parameters, scalar or channel-wise learning rate/retaining gate, etc. Next, we discuss the overview of how existing architectures fit into MIRAS framework.", + "bbox": [ + 111, + 672, + 919, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "RNNs with Hebbian Rule. The first generation of modern recurrent architectures (e.g., Linear attention (Katharopoulos et al. 2020), RetNet (Sun et al. 2023), Mamba (Gu et al. 2024), and GLA (Yang et al. 2024b)) are based on Hebbian-like (e.g., gated Hebbian) learning rule (Hebb 2005). We let attentional bias be the dot product similarity. That is, given a memory $\\mathcal{M} \\in \\mathbb{R}^{d \\times n}$ and $\\mathbf{k}, \\mathbf{v} \\in \\mathbb{R}^d$ , we define $\\tilde{\\ell}_t \\coloneqq -2\\langle \\mathcal{M}_t \\mathbf{k}_t, \\mathbf{v}_t \\rangle$ and local retention as $\\mathrm{Ret}_t(\\mathcal{M}, \\mathcal{M}_{t-1}) = \\| \\mathcal{M}_t - \\alpha \\mathcal{M}_{t-1} \\|_F^2$ . Using Equation Learning-Retaining Viewpoint and gradient descent as the optimizer (i.e., memory learning algorithm), the memory update rule is:", + "bbox": [ + 111, + 747, + 919, + 838 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} _ {t} = \\alpha \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 441, + 844, + 919, + 862 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "When (1) $\\alpha = 1$ , memory update is equivalent to Linear Attention (LA) (Katharopoulos et al. 2020); (2) $\\alpha \\in \\mathbb{R}$ is a learnable parameter, resulting architecture is either lightening attention ( $n > 1$ ) (Li et al. 2025) or RetNet ( $n = 1$ ) (Sun et al. 2023); and (3) $\\alpha_{t} \\in \\mathbb{R}$ are data-dependent learnable parameters, resulting sequence model is Mamba2 (Dao et al. 2024).", + "bbox": [ + 111, + 867, + 919, + 914 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 511, + 936, + 521, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "RNNs with Delta Rule. To improve the memory management and to enhance the memory capacity of the above group, several studies suggest using delta rule (Neil et al. 2017; Schlag et al. 2021) as the learning algorithm in recurrent neural networks (e.g., DeltaNet (Schlag et al. 2021), Longhorn (Liu et al. 2024a), and RWKV7 (Peng et al. 2025b)). In this part, we recall that where $\\mathcal{M} \\in \\mathbb{R}^{d \\times n}$ , delta rule is equivalent to optimizing MSE objective $\\| \\mathcal{M}_t \\mathbf{k}_t - \\mathbf{v}_t \\|_2^2$ with $\\mathrm{Ret}_t(\\mathcal{M}, \\mathcal{M}_{t-1}) = \\| \\mathcal{M}_t - \\alpha \\mathcal{M}_{t-1} \\|_F^2$ as local retention, and stochastic gradient descent as optimizer: ( $\\eta_t$ is defined in Equation Learning-Retaining Viewpoint)", + "bbox": [ + 111, + 92, + 919, + 183 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} _ {t} = \\alpha \\left(\\mathbf {I} - \\eta_ {t} \\mathbf {k} _ {t} \\mathbf {k} _ {t} ^ {\\top}\\right) \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 194, + 919, + 212 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "When (1) $\\alpha = 1$ , memory update is equivalent to DeltaNet (Schlag et al. 2021); and (2) $\\alpha_{t} \\in \\mathbb{R}^{m}$ are data-dependent learnable parameters, resulting sequence model is either Gated DeltaNet (Yang et al. 2024a) ( $m = 1$ ), or RWKV7 (Peng et al. 2025b) ( $m = d$ ). Therefore, RNNs with delta rule are special instances of MIRAS.", + "bbox": [ + 109, + 223, + 919, + 267 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Beyond Delta Rule. As discussed earlier, while delta rule with its value replacement strategy is more powerful than Hebbian-like learning rules, it suffers from theoretical limitations (Irie et al. 2023) and achieves moderate performance in practice (Yang et al. 2024c). Therefore, several studies have focused on update rules beyond delta rule. Recently, Titans (Behrouz et al. 2024c) suggests using non-linear MSE objective of $\\| \\mathcal{M}_t(\\mathbf{k}_t) - \\mathbf{v}_t\\| _2^2$ with both local and global retention of $\\mathrm{D}_t = \\| W_t - W_{t - 1}\\| _F^2$ and $\\mathrm{G}_t = \\| W_t\\| _2^2$ and optimize it with gradient descent with momentum $^2$ . Therefore, Titans-LMM is a special instance of MIRAs, where we use the abovementioned attentional bias and retention regularizations, and gradient descent with momentum as the optimizer.", + "bbox": [ + 109, + 281, + 921, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Another example of such models is Mesa-layer, in which the model uses $\\sum_{i=1}^{t} \\|\\mathcal{M}_{t}(\\mathbf{k}_{i}) - \\mathbf{v}_{i}\\|_{2}^{2}$ as the attentional bias objective with $\\|\\mathcal{M}_{t}\\|_{2}^{2}$ as the retention regularization. Since these models use Newton's method to optimize such an objective, they provide a more expressive update rule than delta rule. We further discuss a set of new learning algorithms beyond delta rule in Section 5.", + "bbox": [ + 109, + 395, + 919, + 455 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Attention. As discussed by Sun et al. (2024), softmax attention is a non-parametric solution of $\\ell_2$ -MSE loss function (i.e., $\\| W\\mathbf{k} - \\mathbf{v}\\| _2^2$ ) with Nadaraya-Watson estimator. Therefore, softmax attention is an instance of MIRAS, when we find the non-parametric solution to the MSE loss with Nadaraya-Watson estimator, without retention.", + "bbox": [ + 111, + 469, + 919, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Beyond Existing Attentional Biases and Retention Gates", + "text_level": 1, + "bbox": [ + 111, + 537, + 750, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As discussed in the previous section, existing work focuses only on linear/quadratic choices for the attentional bias or retention gate. In particular, the loss function $L(\\mathcal{M}(\\mathbf{k}_t),\\mathbf{v}_t)$ is defined as $L(\\mathcal{M}(\\mathbf{k}_t),\\mathbf{v}_t) = c_t\\| \\mathcal{M}(\\mathbf{k}_t) - \\mathbf{v}_t\\|^2$ for some (learnable) constant $c_{t}$ in prior work. Also the regularization term $R_{t}(W)$ or the parametric $D_{t}$ is considered as a quadratic/linear function. In addition, almost all prior work considers $W$ to be the entire $\\mathbb{R}^d$ space. However, in general there could be various choices for all the three aforementioned design choices. To illustrate the potential and flexibility of our designed framework, here, we review some of the potential design choices for attentional bias and retention gate in MirAS. For the sake of clarity, we discuss all these attentional bias and memory retention gates based on using gradient descent as the optimizer, and so based on the provided two view points. However, these attentional bias objectives and retention regularizers can be directly used in Equation 4 and optimized by using any other optimization algorithms, resulting in different update rules.", + "bbox": [ + 109, + 569, + 919, + 720 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 Alternative Attentional Biases", + "text_level": 1, + "bbox": [ + 112, + 738, + 429, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Variant 1: $\\ell_p$ -Attentional Bias. As discussed in the main body, attentional bias defines the \"similarity metric\" and measures how well memory can recall the value, given its corresponding key. Although $\\ell_2$ regression loss often is a natural choice, it is sensitive to noise in the data. A natural extension is to use $\\ell_p$ -norm class of objectives. That is, let $\\mathcal{M}$ be the memory, $\\mathbf{k}$ be the keys, and $\\mathbf{v}$ be the values, we define $\\ell_p$ -attentional bias as:", + "bbox": [ + 109, + 768, + 919, + 829 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\| \\mathcal {M} \\left(\\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {p} ^ {p}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 840, + 919, + 859 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "The retention gate (forget gate) in Titans is different from Mamba2 and Gated DeltaNet that we discussed above. The main difference comes from the case of full memory erase. While Mamba2 gating removes the entire memory and treats the next token as the first ever seen data, Titans use a \"cold start\" strategy and use the previous state of the memory to measure the surprise of the incoming token before fully erasing the memory.", + "bbox": [ + 111, + 867, + 919, + 905 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 511, + 936, + 521, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $p \\in \\mathbb{R}^{\\geq 1}$ and $\\| . \\|_p$ is the $p$ -norm. Although depending on the distribution of the data, we might want to use different values of $p$ (see Section 6), different values of $p$ can result in memory architectures with interesting properties. For the sake of simplicity, let memory be a matrix, i.e., $W \\in \\mathbb{R}^{m \\times d}$ and $\\mathcal{M}(W, \\mathbf{k}_t) = W\\mathbf{k}_t$ , the closed form can be derived as:", + "bbox": [ + 109, + 92, + 919, + 138 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = W _ {t} - p \\eta_ {t} \\left(\\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot | W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} | ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 148, + 919, + 167 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Let $p = 1$ , the recurrence is simplified as:", + "bbox": [ + 112, + 178, + 388, + 193 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t} - \\eta_ {t} \\operatorname {S i g n} \\left(W _ {t} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top}, \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 204, + 919, + 220 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "which means that the memory has only two values of $-1$ and $1$ . We call this variation value-less associative memory, in which we store entities (keys) but map them into two extreme class of $-1$ and $+1$ .", + "bbox": [ + 109, + 233, + 918, + 263 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Remark 5. One of the critical challenges to use the above update rule is in the backpropagation process, in which $\\operatorname{Sign}(\\cdot)$ and $|\\cdot|$ are non-differentiable and so might cause unstable training. To overcome this issue, we use $\\operatorname{Sign}(x) \\approx \\tanh(\\alpha x)$ , and $|x| = \\sqrt{x^2 + \\epsilon}$ , as the smooth approximators of these functions.", + "bbox": [ + 111, + 271, + 919, + 318 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "One simple interpretation for such behavior (i.e., value-less memory) is similar to the coping mechanism in humans (Loftus 1993), in which the memory does not store the values for extreme events. This interpretation of protective memory in extreme events motivates our next variant.", + "bbox": [ + 111, + 325, + 919, + 369 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Variant 2: Huber Loss: Memory with Coping Mechanism. While $\\ell_2$ -norm objective is a common choice for many statistical and machine learning tasks, it is known to be sensitive to outliers and extreme samples. This sensitivity extends to the use of $\\ell_2$ loss for attentional bias. To address this and drawing motivation from robust regression literature, we suggest utilizing the Huber loss-type (Hastie et al. 2009; Huber 1992) as the attentional bias, thereby reducing the negative impact of the outlier data on the memory learning process.", + "bbox": [ + 109, + 383, + 919, + 460 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We can apply Huber-type loss in three different ways: The first approach is to define the summation of the Huber loss across different coordinates as the total loss, i.e.,", + "bbox": [ + 109, + 467, + 918, + 497 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\ell (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\sum_ {j} \\mathcal {H} (\\mathcal {M} (W, \\mathbf {k} _ {t}) _ {j} - \\mathbf {v} _ {t, j}),\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 505, + 651, + 539 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\mathcal{M}(W,\\mathbf{k}_t)_j$ and $\\mathbf{v}_{t,j}$ denote the $j$ -th coordinate of $\\mathcal{M}(W,\\mathbf{k}_t)$ and $\\mathbf{v}_t$ respectively. The function $\\mathcal{H}(\\cdot):\\mathbb{R}\\mapsto \\mathbb{R}$ is the Huber loss defined as", + "bbox": [ + 109, + 550, + 919, + 578 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {H} (a) = \\left\\{ \\begin{array}{l l} \\frac {1}{2} a ^ {2} & \\text {i f} | a | \\leq \\delta \\\\ \\delta \\left(| a | - \\frac {1}{2} \\delta\\right) & \\text {i f} | a | > \\delta . \\end{array} \\right. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 577, + 919, + 609 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Utilizing this attentional bias can lead to various memory update rules. For example, for the matrix form memory $\\mathcal{M}(W,\\mathbf{k}_t) = W\\mathbf{k}_t$ , the update rule is given by", + "bbox": [ + 112, + 614, + 919, + 646 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t - 1} - \\eta_ {t} \\left[ \\left(\\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {T}\\right) \\odot \\left(\\mathbf {I} \\left(\\left| W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| \\leq \\delta_ {t}\\right) \\mathbf {1} ^ {\\top}\\right) + \\left(\\delta_ {t} \\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} ^ {\\top}\\right) \\odot \\left(\\mathbf {I} \\left(\\left| W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| > \\delta_ {t}\\right) \\mathbf {1} ^ {\\top}\\right) \\right] \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 655, + 919, + 681 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this formulation, the parameter $\\delta_t$ decides the type of the memory used for each block of memory ( $\\ell_2$ -norm objective or value-less) based on the context, making the memory more robust to outliers.", + "bbox": [ + 109, + 691, + 919, + 722 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The second approach is to define the Huber-type loss based on the $\\ell_2$ loss over all coordinates, i.e.,", + "bbox": [ + 111, + 729, + 769, + 744 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\ell (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\mathcal {H} (\\| \\mathcal {M} (W, \\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| _ {2}).\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 757, + 643, + 773 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For simplicity of derivations, assume matrix memory $M(W,\\mathbf{k}_t) = W\\mathbf{k}_t$ . Then using gradient descent for updating memory leads the memory update rule", + "bbox": [ + 109, + 785, + 919, + 816 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t - 1} - \\eta_ {t} \\left\\{ \\begin{array}{l l} \\left(\\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {T} & \\text {i f} \\| \\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} \\leq \\delta_ {t}, \\\\ \\delta_ {t} \\frac {\\left(\\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right)}{\\| \\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2}} \\mathbf {k} _ {t} ^ {T} & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 825, + 919, + 867 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Again, in the form (15), the parameter $\\delta_t$ decides the type of the memory used ( $\\ell_2$ -norm objective or normalized version) based on the context, making the memory more robust to outliers.", + "bbox": [ + 109, + 877, + 919, + 907 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 511, + 936, + 521, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Finally, in the third approach, we present a smooth mixture method, in which the memory decides if for an incoming data it is better to use $\\ell_2$ or $\\ell_1$ attentional bias:", + "bbox": [ + 111, + 92, + 919, + 122 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t - 1} - \\left\\{ \\begin{array}{l l} \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {i f} \\| \\mathcal {M} (\\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| \\leq \\delta_ {t}, \\\\ \\eta_ {t} \\delta_ {t} \\nabla \\ell_ {1} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 130, + 919, + 170 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The role of parameter $\\delta_t$ is the same as above.", + "bbox": [ + 112, + 178, + 421, + 193 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Variant 3: Memory Robust to Value Shifts. Following the robustness requirement discussed in the previous section, we aim to design a memory mechanism that exhibits resilience against small shifts in the value parameter. A natural approach in this context is to employ a robust optimization formulation. Specifically, we define the loss function as the worst-case $\\ell_2$ distance between the predicted memory output and the perturbed true value:", + "bbox": [ + 111, + 205, + 919, + 267 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\max _ {\\| \\delta \\mathbf {v} _ {t} \\| _ {2} \\leq \\Delta} \\frac {1}{2} \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\left(\\mathbf {v} _ {t} + \\boldsymbol {\\delta} \\mathbf {v} _ {t}\\right) \\| _ {2} ^ {2}. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 275, + 919, + 306 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This formulation seeks the memory parameters $W$ that perform well even under the adverse local perturbation of the true value $\\mathbf{v}_t$ within an $\\ell_2$ ball of radius $\\Delta$ . To solve the maximization problem in (17), we find the optimal perturbation $\\delta \\mathbf{v}_t^*$ . By solving this problem with respect to $\\delta \\mathbf{v}_t$ , we arrive at:", + "bbox": [ + 111, + 313, + 919, + 358 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\delta \\mathbf {v} _ {t} ^ {*} = \\Delta \\frac {- \\mathcal {M} (W , \\mathbf {k} _ {t}) + \\mathbf {v} _ {t}}{\\| \\mathcal {M} (W , \\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| _ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 364, + 607, + 398 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Substituting this optimal perturbation back into the loss function (17), we obtain the robust loss:", + "bbox": [ + 111, + 405, + 756, + 420 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\frac {1}{2} \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} ^ {2} + \\Delta \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} + \\frac {1}{2} \\Delta^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 428, + 750, + 455 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This robust loss function is a combination of the standard $\\ell_2$ loss and a term proportional to the $\\ell_2$ norm of the error, scaled by the robustness parameter $\\Delta$ . The value of $\\Delta$ thus controls the trade-off between fitting the nominal data and ensuring robustness against value perturbations.", + "bbox": [ + 111, + 463, + 919, + 508 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For simplicity of the derivations, let us consider a constant value for $\\Delta$ , an Euclidean retention gate $\\mathrm{Ret}_t(W,W_{t - 1}) = \\| W - W_{t - 1}\\|^2$ , and an attentional bias term $\\widetilde{\\ell} (W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{t - 1},\\nabla \\ell (W_{t - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle$ . Furthermore, to simplify the memory operation, we assume a linear matrix memory model $\\mathcal{M}(W,\\mathbf{k}_t) = W\\mathbf{k}_t$ . Under these assumptions, we can derive the memory update mechanism using gradient descent on the robust loss:", + "bbox": [ + 112, + 516, + 919, + 575 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t - 1} - \\eta \\left(\\left(\\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top} + \\Delta \\frac {\\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}}{\\| \\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2}} \\mathbf {k} _ {t} ^ {\\top}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 594, + 740, + 628 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this update rule, the parameter $\\Delta$ , which governs the influence of the robustness term, can also be treated as a learnable parameter, allowing the model to adapt its robustness based on the observed data.", + "bbox": [ + 111, + 631, + 918, + 662 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.2 Alternative Retention Gates", + "text_level": 1, + "bbox": [ + 112, + 679, + 408, + 695 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Variant 1: Memorization Over A Scaled Probability Simplex Via $f$ -Divergence. A common technique in learning to prevent numerical instabilities and exploding values is to restrict the search space to a bounded domain. Following this principle, to avoid numerical instabilities, we can constrained the variable $W_{t}$ to lie within a (scaled) probability simplex. In other words, we can restrict the state to lie in the constraint set", + "bbox": [ + 111, + 710, + 919, + 768 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {W} = \\{W \\mid \\| W \\| _ {1} = c \\text {a n d} W _ {j l} \\geq 0, \\forall j, l \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 779, + 656, + 796 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this set, each matrix $W$ can be viewed as a measure. Thus, in (Learning-Retaining Viewpoint), we can utilize divergences over measures to define our premetric. For example, we can use $f$ -divergence measure (Polyanskiy et al. 2025, Def 4.9), (Csiszar 1967) to define $\\mathrm{D}_t(\\cdot, \\cdot)$ . More specifically, let $f(\\cdot)$ be a smooth strictly convex function from $\\mathbb{R}^+$ to $\\mathbb{R}$ with $f(1) = 0$ . Then, we can define the $f$ -divergence between $W$ and $W'$ as", + "bbox": [ + 111, + 805, + 921, + 866 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {D} _ {t} (W, W ^ {\\prime}) = \\sum_ {j l} W _ {j l} ^ {\\prime} f \\left(\\frac {W _ {j l}}{W _ {j l} ^ {\\prime}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 410, + 873, + 617, + 914 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "It is known that $f$ -divergence is zero if and only if $W = W'$ ; see Polyanskiy et al. 2025, Theorem 2.3. Using the above premetric as the retention gate and setting $\\widetilde{\\ell}(W; \\mathbf{k}_t, \\mathbf{v}_t) = \\langle W - W_{t-1}, \\nabla \\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\rangle$ in (Learning-Retaining Viewpoint), we get the update rule", + "bbox": [ + 109, + 92, + 919, + 136 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = W _ {t - 1} \\odot g \\left(- \\zeta_ {t} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right). \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 138, + 919, + 152 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Here $g(\\cdot)$ is the inverse of the mapping $f'$ , i.e., $g(f'(\\tau)) = \\tau$ , $\\forall \\tau$ ; the operator $\\odot$ denotes the Hadamard (elementwise) product, and $\\zeta_t$ should be chosen such that $\\| W_t\\|_1 = c$ . Notice that since the function $f(\\cdot)$ is strictly convex and smooth, its derivative is strictly increasing and hence $g(\\cdot)$ is well defined. Conversely, for any strictly monotone function $g(\\cdot)$ , we can find its inverse function $g^{-1}$ (which is strictly increasing) and define $f(\\tau) = \\mathrm{const} + \\int_{\\tau' = 0}^{\\infty}g^{-1}(\\tau')d\\tau'$ . The term const should be chosen such that $f(1) = 0$ . Then the update rule in (18) can be interpreted by the $f$ -divergence regularization, as explained above. Therefore, one can directly choose a continuous monotonically increasing function $g(\\cdot)$ and use (18) for memory update.", + "bbox": [ + 109, + 160, + 919, + 266 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Specializing to KL divergence. Let us further make the above update rule explicit by using special function $f$ . If we choose $f(\\tau) = \\tau \\ln(\\tau)$ , then the $f$ -divergence becomes the widely used KL divergence measure $D_t(W, W_{t-1}) = \\sum_{jl} W_{jl} \\log \\left( \\frac{W_{jl}}{(W_t)_{jl}} \\right)$ . In addition, we can also utilize the Shannon entropy as the global retention by regularizing deviations from uniform distribution, i.e., $G_t(W) = \\sum_{jl} W_{jl} \\log (W_{jl})$ . Combining these choices of the local and global retention gates, we obtain the overall retention gate", + "bbox": [ + 109, + 280, + 919, + 362 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e t} _ {t} (W, W _ {t - 1}) = \\frac {1}{\\eta_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(\\frac {W _ {j l}}{\\left(W _ {t}\\right) _ {j l}}\\right) + \\frac {1}{\\alpha_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(W _ {j l}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 362, + 723, + 400 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Choosing the attentional bias $\\widetilde{\\ell}(W; \\mathbf{k}_t, \\mathbf{v}_t) = \\langle W - W_{t-1}, \\nabla \\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\rangle$ and the above retention gate will lead to the update rule", + "bbox": [ + 109, + 407, + 918, + 439 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\arg \\min _ {W} \\left\\langle W - W _ {t - 1}, \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) \\right\\rangle + \\frac {1}{\\eta_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(\\frac {W _ {j l}}{\\left(W _ {t}\\right) _ {j l}}\\right) + \\frac {1}{\\alpha_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(W _ {j l}\\right) \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 449, + 919, + 487 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s . t .} \\quad \\sum_ {j l} W _ {j l} = c, W _ {j l} \\geq 0, \\forall j l \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 489, + 919, + 523 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Attaching the Lagrange multiplier to the first constraint, the KKT conditions imply", + "bbox": [ + 112, + 535, + 679, + 550 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) _ {j l} + \\left(\\frac {1}{\\eta_ {t}} + \\frac {1}{\\alpha_ {t}}\\right) \\left(1 + \\log W _ {j l}\\right) - \\frac {1}{\\eta_ {t}} \\log \\left(\\left(W _ {t - 1}\\right) _ {j l}\\right) + \\mu_ {t} = 0, \\quad \\forall j, l\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 561, + 785, + 595 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $\\mu_t$ should be chosen such that $\\sum_{jl} W_{jl} = c$ . Rearranging the terms and defining $\\lambda_t = \\frac{1 / \\alpha_t}{1 / \\alpha_t + 1 / \\eta_t}$ , $\\eta_t' = \\frac{1}{1 / \\alpha_t + 1 / \\eta_t}$ , we get the update rule", + "bbox": [ + 109, + 607, + 918, + 638 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} \\leftarrow c \\operatorname {S o f t m a x} \\left(\\left(1 - \\lambda_ {t}\\right) \\log \\left(W _ {t - 1}\\right) - \\eta_ {t} ^ {\\prime} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 640, + 919, + 656 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $\\lambda_t \\in (0,1)$ and $\\eta' \\in \\mathbb{R}^+$ are the parameters that can be learned during training. The Softmax operator ensures that the output lies in the set $\\mathcal{W}$ .", + "bbox": [ + 109, + 662, + 919, + 693 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Notice that while all above calculations are done for a matrix $W$ , similar update rule holds for other forms of parameters such as when $W$ is a neural network (or when the parameter $W$ is normalized per slice).", + "bbox": [ + 109, + 700, + 918, + 731 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Variant 2: Elastic Net Regularization: Hard and Soft Forgetting. Elastic net is a powerful and popular tool in regression analysis to balance the feature selection capabilities of LASSO (Tibshirani 1996) and bias reduction properties of Ridge regression (Hilt et al. 1977; Hoerl et al. 1970). It has been widely used in different applications due to its ability to handle high-dimensional data and mitigate the effects of multicollinearity. Given this success, a natural question is what happens if we use this regularization scheme in our context.", + "bbox": [ + 109, + 744, + 919, + 820 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Let us start based on (Learning-Retaining Viewpoint) to design our memorization scheme. In (Learning-Retaining Viewpoint), we discussed that the loss function $\\widetilde{\\ell_t} (W;\\mathbf{k}_t,\\mathbf{v}_t)$ is an approximation of the original function $\\ell (\\cdot)$ , measuring our goodness-of-fit. Regularizing this loss with elastic net regularizer, we obtain the approximation", + "bbox": [ + 109, + 828, + 931, + 873 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {\\ell} _ {t} (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\langle W - W _ {t - 1}, \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\rangle .\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 883, + 665, + 901 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "with a global retention of $\\mathrm{G}_t(W) = \\frac{1}{2\\beta} \\| W\\| _2^2 +\\frac{1}{\\alpha}\\| W\\| _1$ . To fully specify the update rule of (Learning-Retaining Viewpoint), we also need to specify the premetric functions $\\mathrm{D}_t(\\cdot ,\\cdot)$ . For the sake of keeping the update rule simple (and parallelizable), we can choose", + "bbox": [ + 109, + 90, + 919, + 136 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {D} _ {t} (W, W _ {t - 1}) = \\frac {1}{2} \\| W - W _ {t - 1} \\| _ {2} ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 135, + 620, + 162 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "These choices of the attentional bias and retention gate leads to the following update rule:", + "bbox": [ + 112, + 167, + 715, + 183 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\mathcal {S} _ {Y} \\left(\\lambda W _ {t - 1} - \\zeta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right), \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 205, + 919, + 220 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $\\gamma = \\frac{\\eta\\beta}{\\alpha(\\eta + \\beta)}$ , $\\lambda = \\frac{\\beta}{\\beta + \\eta}$ , $\\zeta = \\eta\\lambda$ , and $S_{\\gamma}$ is the soft thresholding operator, applied element-wise. For each element, this operator is defined as", + "bbox": [ + 109, + 227, + 918, + 261 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {\\gamma} (z) = \\operatorname {s i g n} (z) \\max \\left\\{0, | z | - \\gamma \\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 262, + 625, + 279 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In other words, for large values of $z$ , $S_{\\gamma}(z)$ makes $z$ closer to zero by $\\gamma$ amount. If it is already in the $\\gamma$ -vicinity of zero, then it makes it zero (hard forget).", + "bbox": [ + 109, + 284, + 919, + 316 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Equation (22) can be viewed as a combination of soft forgetting (obtained by multiplying $W$ by $\\lambda \\in (0,1)$ , and a hard forgetting (if it is smaller than $\\gamma$ ). The hyperparameters $\\gamma, \\lambda,$ and $\\zeta$ can be learned. Notice that since the shrinkage operator is not differentiable, we can approximate it with its smooth approximation. For example, we can use $S_{\\gamma}(z) \\approx \\frac{|z|*\\arctan(z / \\gamma)}{\\pi / 2}$ .", + "bbox": [ + 112, + 321, + 919, + 390 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Variant 3: Elastic Net Regularization: Forgetting via Soft-thresholding. The elastic net regularizer can also be used in the (FTRL Viewpoint). In particular, in (FTRL Viewpoint), we can set", + "bbox": [ + 109, + 401, + 919, + 431 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{\\eta_ {t}} R _ {t} (W) = \\frac {1}{\\eta} \\| W \\| ^ {2} + \\frac {1}{\\alpha} \\| W \\| _ {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 441, + 616, + 473 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "and use $\\widehat{\\ell}(W; x_i) = \\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; x_i) \\rangle$ . Assuming initialization at $W_0 = 0$ , these choices of attentional bias and retention gate leads to the update rules:", + "bbox": [ + 109, + 484, + 919, + 516 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nA _ {t} = A _ {t - 1} - \\eta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 527, + 614, + 542 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\mathcal {S} _ {\\eta / \\alpha} (A _ {t}) \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 546, + 916, + 564 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Here $S_{\\eta /\\alpha}(\\cdot)$ is the soft-thresholding operator with parameter $\\eta /\\alpha$ , which can be smoothly as explained in Variant 1.1.", + "bbox": [ + 111, + 574, + 870, + 590 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Variant 4: General $L_{q}$ Memory Stability. Existing work is based on the retention gate choices $\\mathrm{D}_t(W, W_{t-1}) = \\|W - W_{t-1}\\|_F^2$ or $R(W) = \\|W\\|_2^2$ . However, one can choose other choices of retention gate. For example, in (FTRL Viewpoint), we can choose $L_{q}$ norm as the regularizer $R(W)$ . More specifically, for $1 < q \\leq 2$ , we can set", + "bbox": [ + 109, + 602, + 918, + 650 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{\\eta_ {t}} R (W) = \\frac {1}{2 \\eta (q - 1)} \\| W \\| _ {q} ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 659, + 607, + 691 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Using this retention gate and choosing $\\widehat{\\ell_i} (W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{i - 1},\\nabla \\ell (W_{i - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle$ in (FTRL Viewpoint), leads to the update rule $W_{t} = -\\eta \\frac{A_{t}}{\\|A_{t}\\|_{p}^{p - 2}}$ , where $p = \\frac{q}{q - 1}$ and $A_{t} = \\sum_{i = 1}^{t}\\nabla \\ell (W_{i - 1};\\mathbf{k}_{t},\\mathbf{v}_{t})$ ; see Shalev-Shwartz et al. 2012, Section 2.6. Here, $\\odot$ denotes the Hadamard (element-wise) product and $|\\cdot |$ is the element-wise absolute value operator. Assuming $W_0 = 0$ , this update rule can be recursively written as:", + "bbox": [ + 109, + 702, + 919, + 771 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nA _ {t} = A _ {t - 1} - \\eta \\nabla \\ell \\left(W _ {i - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\quad \\text {a n d} \\quad W _ {t} = \\frac {A _ {t}}{\\| A _ {t} \\| _ {p} ^ {p - 2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 780, + 697, + 816 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Variant 5: Bregman Divergence as Retention Gate.. Another natural choice is to use Bregman divergence as retention gate, leading to a mirror descent-type algorithms. In particular, given a smooth strictly convex function $f(\\cdot): \\mathbb{R} \\mapsto \\mathbb{R}$ , we can define the function $F(W) = \\sum_{jl} f(W_{jl})$ . Based on this choice of function $F$ , we define the Bregman divergence", + "bbox": [ + 109, + 840, + 919, + 886 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nD _ {t} (W, W ^ {\\prime}) = F (W) - F \\left(W ^ {\\prime}\\right) - \\langle W ^ {\\prime}, W - W ^ {\\prime} \\rangle\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 897, + 666, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 508, + 936, + 524, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "as our parametric function. Utilizing this retention gate and choosing $\\widetilde{\\ell}_t(W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{t - 1},\\nabla \\ell (W_{t - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle$ in (Learning-Retaining Viewpoint), we obtain the update rule", + "bbox": [ + 109, + 90, + 919, + 122 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = g \\left(- \\eta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) + F ^ {\\prime} \\left(W _ {t - 1}\\right)\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 132, + 651, + 148 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Here, $F'$ is the mapping obtained by applying $f'(\\cdot)$ (the derivative of $f$ ) element-wise to all entries of its input matrix argument. The function $g$ is the inverse of the mapping $F'(\\cdot)$ , i.e., $g(F'(W)) = W$ .", + "bbox": [ + 109, + 157, + 919, + 189 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "If we choose $f(\\tau) = \\frac{\\tau^2}{2}$ , then $F'(W)$ becomes the identity mapping and so is $g$ . Therefore, the above update becomes simple gradient descent with no nonlinearity involved in the update rule. However, other choices of $f(\\cdot)$ introduces additional nonlinearity in $g(\\cdot)$ , which can enhance the expressivity of our memory. For example, we can choose the function $f(\\cdot)$ so that its derivative becomes the inverse sigmoid function, i.e., $f'(\\tau) = \\ln \\left( \\frac{\\tau}{1 - \\tau} \\right)$ with $f': (0,1) \\mapsto \\mathbb{R}$ . Since $f'(\\cdot)$ is strictly increasing, then the function $f(\\cdot)$ (and hence $F(\\cdot)$ ) is strictly convex. Therefore, the Bregman divergence is well defined. Moreover, the inverse of the function $f'(\\cdot)$ becomes the sigmoid function, i.e., $g(\\tau) = \\sigma(\\tau) = \\frac{\\exp(\\tau)}{1 + \\exp(\\tau)}$ with $g: \\mathbb{R} \\mapsto (0,1)$ . Then, the update of the memory becomes", + "bbox": [ + 109, + 196, + 921, + 306 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\sigma \\left(\\ln \\left(\\frac {W _ {t}}{1 - W _ {t}}\\right) - \\eta \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t})\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 316, + 658, + 351 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $\\sigma$ is the sigmoid function operated element-wise on the entries of $W$ , and the division operator $\\frac{W_t}{1 - W_t}$ is also performed element-wise. This update rule guarantees that the elements of $W_t$ remain within the interval $(0, 1)$ .", + "bbox": [ + 109, + 359, + 919, + 391 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.3 MIRAs's Variants: MONETA, YAAD, and MEMORA", + "text_level": 1, + "bbox": [ + 112, + 409, + 578, + 426 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In the previous section we discussed different potential choices for attentional bias and retention gate to show the generality and the potential of MIRAs. In this section, building upon our framework, we present three novel sequence models, each of which designed based on a different motivation, and discuss how they can leverage fast parallel training.", + "bbox": [ + 109, + 434, + 919, + 481 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "MOnETA. Given $p,q\\in \\mathbb{R}^{\\geq 1}$ , we design $(p,q)$ -MONETA as the variant of MIRAs as follows: (1) For the choice of memory architecture, we use an MLP with 2 layers with expansion factor of 4 and GELU activation function (Hendrycks et al. 2016). We also use residual connections and layer norm, resulting in $\\mathcal{M}(x) = x + \\mathsf{LN}(W_1\\sigma (W_2x))$ . (2) We choose $\\ell_p$ -attentional bias (introduced in Equation 11) for MONETA. (3) For the choice of retention gate, we use the hybrid of $\\ell_q$ retention gate $\\frac{1}{2(q - 1)}\\| W\\| _q^2$ (see Section 5.2 for details) and the standard $\\ell_2$ regularization $\\frac{1}{\\beta}\\| W\\| _2^2$ . (4) Finally, we use gradient descent as the memory learning algorithm. The above choices, result in the following recurrent formula for the memory module:", + "bbox": [ + 109, + 492, + 919, + 585 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nA _ {t} = \\alpha_ {t} A _ {t - 1} - \\eta_ {t} \\nabla \\ell_ {p} \\left(W _ {i - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\quad \\text {a n d} \\quad W _ {t} = \\frac {A _ {t}}{\\| A _ {t} \\| _ {q} ^ {q - 2}}. \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 593, + 919, + 630 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Notably the gradient can be calculated using:", + "bbox": [ + 112, + 638, + 419, + 652 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = p \\eta_ {t} \\left(\\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot | W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} | ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top}. \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 662, + 919, + 680 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We use $(p,q) = (3,4)$ .", + "bbox": [ + 112, + 689, + 261, + 705 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "YAAD. Building upon our discussion on the importance of robust memory that protects itself from extreme events (tokens), we design YAAD based on Huber objective. That is, in MirAS, for the choice of memory structure, we follow MONETA and use an MLP with the same architecture as above; for the choice of attentional bias, we use Huber loss (defined in Equation 16); for the choice retention gate, for the sake of simplicity, we use a combination of local and global retention as $\\mathrm{Ret}_t(W,W_{t - 1}) = \\frac{1}{2\\theta_t}\\| W - W_{t - 1}\\| _F^2 +\\frac{1}{\\beta_t}\\| W\\| _2^2$ , which is equivalent to the \"forget gate\" mechanism introduced by Behrouz et al. (2024c); and finally, we simply use gradient descent as the memory learning algorithm. Given the above choices, we can write the resulted memory learning process as follows:", + "bbox": [ + 109, + 717, + 919, + 825 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\alpha_ {t} W _ {t - 1} - \\left\\{ \\begin{array}{l l} \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {i f} \\| \\mathcal {M} (\\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| \\leq \\delta_ {t}, \\\\ \\eta_ {t} \\delta_ {t} \\nabla \\ell_ {1} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 834, + 919, + 875 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Note that for improving the expressive power, in all architectures, we decouple the learning rate $\\eta$ and the retention gate rate $\\alpha$ , resulting in an independent parameter $\\beta_{t} \\in [0,1]^{d}$ .", + "bbox": [ + 109, + 882, + 919, + 914 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9cb01a969297d8878bd8358e093a6abd23c24cfb85585b92f9cd441c4a9e7943.jpg", + "image_caption": [ + "Figure 2: Visualization of the MirAs's variant architecture, their hybrid counterpart with SWA, and block design of MirAs layer." + ], + "image_footnote": [], + "bbox": [ + 181, + 89, + 344, + 349 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/e133558c8c3bee1beea63c659fa263e8cff03265eb3eaa12333ad63238b2f34c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 88, + 531, + 349 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/736c0aee42d8244d6bf36c4864778f73d5974662cdb103a9c534a218935a9611.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 558, + 89, + 851, + 349 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "MEMORA. Finally, in MEMORA, we use the idea of elastic net regularization (i.e., hard and soft retention). To this end, in Miras: (1) For the choice of memory architecture, similar to above variants, we use an MLP (the same architecture as the previous variants). (2) For the choice of attentional bias, we use simple $\\ell_2$ regression loss. (3) For the choice of retention gate we use KL divergence as in Equation 21. (4) Finally, we optimize the memory using gradient descent, resulting in the following update rule:", + "bbox": [ + 109, + 422, + 919, + 500 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nW _ {t} = \\operatorname {S o f t m a x} \\left(\\alpha_ {t} \\log \\left(W _ {t - 1}\\right) - \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 532, + 919, + 550 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.4 Architecture Backbone and Fast Training", + "text_level": 1, + "bbox": [ + 112, + 580, + 527, + 598 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Architectural Backbone. For the architectural backbone, we fully follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a): We replace attention modules with our variants of MIRAs in Llama's macro architecture with MLPs with SwiGLU(. ) activation, rotary positional encodings (RoPE) (Su et al. 2024), and RMSNorm (Zhang et al. 2019). For MIRAs layer block, we follow the recent modern linear recurrent models (Behrouz et al. 2024c; Yang et al. 2024a), and incorporate a 1D depthwise-separable convolution layer (with kernel size of 4) after each of the query, key, and value projections. For the sake of training stability, we also use $\\ell_2$ normalization to $\\mathbf{q}$ and $\\mathbf{k}$ . The output of MIRAs layer block is normalized and gated with a linear layer (Mehta et al. 2023).", + "bbox": [ + 109, + 609, + 919, + 717 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Channel-wise Parameters. For learnable parameters of $\\eta_t, \\delta_t$ and the retention gate of $\\alpha_t$ we use channel-wise parametrization, i.e., $\\eta_t, \\delta_t, \\alpha_t \\in \\mathbb{R}^d$ . While gaining more expressive power, this parametrization results in significant parameter increase. To mitigate this issue, following Peng et al. (2025b), we use low-rank projections to project the input into $\\mathbb{R}^k$ and then to $\\mathbb{R}^d$ , where $k$ is a hyperparameter (usually 32 or 64). The backbone architecture is illustrated in Figure 2.", + "bbox": [ + 109, + 729, + 919, + 791 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Hybrid Models. We also evaluate the hybrid version of Miras's variants. For hybrid models, we follow the Samba (Ren et al. 2024) architecture, in which we sequentially combine our Miras layer with Sliding Window Attention (SWA). The illustration of hybrid model Figure 2.", + "bbox": [ + 109, + 804, + 918, + 849 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Parallelizable Training. While the design of Miras's variant are theoretically well-motivated, their recurrence is non-linear, potentially making their straightforward training slow for large scales. In this section, we build upon the work of Behrouz et al. (2024c) and Sun et al. (2024) to make the training parallelizable. The main idea is to divide the sequence into", + "bbox": [ + 109, + 862, + 919, + 909 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "chunks with size $b$ (usually is 16 or 64) and calculate the gradient for all tokens in the current chunk with respect to the last state of the memory in the previous chunk. That is, we use $\\nabla \\ell(\\mathcal{M}_{t'}; \\mathbf{k}_t, \\mathbf{v}_t)$ instead of $\\nabla \\ell(\\mathcal{M}_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t)$ , where $t'$ is the last state in the previous chunk.", + "bbox": [ + 109, + 92, + 919, + 137 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Given the above trick, we can calculate all gradients at once and make the recurrence inside each chunk linear. However, to fully take advantage of accelerators, we need to reformulate the process as matrix multiplication. For MONETA, for the sake of clarity, assume $q = 2$ . We follow the same algorithm as Behrouz et al. (2024c) and expand the recurrence as follows:", + "bbox": [ + 109, + 143, + 919, + 205 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {M} _ {t} = \\alpha_ {t} \\mathcal {M} _ {t - 1} - \\eta_ {t} \\nabla \\ell (\\mathcal {M} _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\\\ = \\beta_ {t} \\mathcal {M} _ {0} - \\sum_ {i = 1} ^ {t} \\eta_ {i} \\frac {\\beta_ {t}}{\\beta_ {i}} \\nabla \\ell \\left(\\mathcal {M} _ {t ^ {\\prime}}; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right), \\tag {28} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 218, + 919, + 275 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $t' = t - \\mathrm{mod}(t, b)$ , and $\\beta_{i} = \\prod_{j=1}^{i} \\alpha_{j}$ . For the sake of clarity, we focus on the first chunk, i.e., $t = b$ and so $t' = 0$ , and explain the process for the case that $\\mathcal{M}_t = W_t$ is linear. The process for 2-layer MLPs and other chunks is similar. Using $\\ell_p$ loss function, we have:", + "bbox": [ + 109, + 287, + 919, + 333 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla \\ell \\left(W _ {0}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = p \\left(\\operatorname {S i g n} \\left(W _ {0} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot \\left| W _ {0} \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top} \\\\ \\Rightarrow \\sum_ {i = 1} ^ {b} \\eta_ {i} \\frac {\\beta_ {b}}{\\beta_ {i}} \\nabla \\ell \\left(W _ {0};; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) = p \\mathbf {E} _ {b} \\odot \\mathbf {B} _ {b} \\odot \\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot \\left(\\left| W _ {0} \\mathbf {K} - \\mathbf {V} \\right| ^ {p - 1}\\right) \\mathbf {K} ^ {\\top}, \\tag {29} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 344, + 919, + 404 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\mathbf{E}_b = \\left[\\eta_1\\quad \\eta_2\\quad \\dots \\quad \\eta_b\\right]$ and $\\mathbf{B}_b$ is defined analogously on $\\frac{\\beta_b}{\\beta_i}\\mathrm{s}$ . For the sake of stability in training, we use $\\operatorname{Sign}(x)\\approx \\tanh (\\alpha x)$ and $|x| = \\sqrt{x^2 + \\epsilon}$ , where $\\epsilon >0$ is a small number (i.e., $\\epsilon = 1e - 6$ ). As discussed in Equation 24, the case that $q\\neq 2$ appears as a normalization term on the memory. Similar to Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024), we do not apply this non-linearity inside each chunk and instead use it at the end of each chunk.", + "bbox": [ + 109, + 417, + 919, + 484 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For YAAD, the process is very similar to the above. We calculate the gradient of both $\\ell_1$ and $\\ell_2$ loss and use a masking based on $\\| \\mathcal{M}(\\mathbf{k}_t) - \\mathbf{v}_t\\| \\leq \\delta_t$ .", + "bbox": [ + 109, + 491, + 918, + 523 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For MEMORA, the update rule has two non-linear parts, i.e., softmax and log, making the model hardly parallelizable. To this end, as discussed above, we use its linear version inside each chunk and its non-linear version across chunks. However, using both log and softmax at the end of each chunk removes the effect of log. To this end, we consider a lag tokens after each chunk (i.e., tokens with index $i = kb + 1$ , where $b$ is the chunk size and $k \\in \\mathbb{Z}^+$ ). That is, let $\\mathcal{M}_0$ be the last state of the memory in previous chunk, we have:", + "bbox": [ + 109, + 529, + 919, + 604 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} _ {1} = \\operatorname {S o f t m a x} \\left(\\alpha_ {1} \\log \\left(\\mathcal {M} _ {0}\\right) - \\eta_ {1} \\nabla \\ell_ {2} \\left(\\mathcal {M} _ {0}; \\mathbf {k} _ {1}, \\mathbf {v} _ {1}\\right)\\right), \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 351, + 617, + 919, + 632 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and then we use $\\mathcal{M}_1$ for the next chunk. Again, for the sake of clarity, assume that memory is linear, i.e., $\\mathcal{M}_1 = W_1$ :", + "bbox": [ + 109, + 643, + 870, + 659 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla \\ell \\left(W _ {1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = \\left(W _ {1} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top} (31) \\\\ \\Rightarrow \\sum_ {i = 1} ^ {b} \\eta_ {i} \\frac {\\beta_ {b}}{\\beta_ {i}} \\nabla \\ell \\left(W _ {1};; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) = \\mathbf {E} _ {b} \\odot \\mathbf {B} _ {b} \\odot \\left(W _ {1} \\mathbf {K} - \\mathbf {V}\\right) \\mathbf {K} ^ {\\top}, (32) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 670, + 919, + 729 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where matrices are defined the same as for Equation 29.", + "bbox": [ + 109, + 739, + 486, + 755 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6 Experiments", + "text_level": 1, + "bbox": [ + 112, + 777, + 290, + 797 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In our experimental evaluations, we aim to answer three main questions: (1) Does different attentional biases results in different architectures in practice? (2) How does different types of retention gates (i.e., retention gate) affect the performance of the model in long context? (3) How do MEMORA, MONETA, and YAAD perform in downstream tasks compare to baselines?", + "bbox": [ + 109, + 808, + 918, + 869 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Setup. We train our models with training context window of size 4096 using either FineWeb-Edu dataset (Penedo et al. 2024) (for LM and common-sense reasoning tasks) or C4 dataset (Raffel et al. 2020) (for scaling patterns). We use model", + "bbox": [ + 109, + 882, + 919, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/23e8a3c068128a8a12d8568ecdde19b54a9ecb901162d0c610604a0abdd79fbe.jpg", + "image_caption": [ + "Figure 3: Scaling patterns when increasing (Left) model size, (Middle) sequence length (model size = 340M) (3) (Right) sequence length (model size = 760M) on C4 dataset." + ], + "image_footnote": [], + "bbox": [ + 117, + 90, + 377, + 215 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/bb38c6f7c2bb840b3f533a645aa391e2005cae92f583aa262a8d6028d4e35f08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 388, + 90, + 645, + 215 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/1e71d292b87b5b94660e97445a73eff75c18a71916cb028bc365fee679360b73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 90, + 916, + 215 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "sizes of 120M, 340M, 760M, and 1.3B parameters. We train small models (120M and 340M) on 15B tokens sampled from the dataset, the medium size model (760M) on 30B tokens, and the large model on 100B tokens. Baseline results are reported by Behrouz et al. (2024c).", + "bbox": [ + 109, + 285, + 919, + 330 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.1 Language Modeling and Common-sense Reasoning", + "text_level": 1, + "bbox": [ + 109, + 349, + 616, + 367 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a,c) and first focus on the perplexity in language modeling and also commonsense reasoning tasks. The results for MEMORA, YAAD, MONETA and also baselines with size of 340M, 760, and 1.3B are reported in Table 2. All of our variants outperforms all the baselines including Transformer++, modern linear recurrent models and hybrid methods. The superior performance compared to hybrid models is particularly important as all of our variants are pure recurrent (attention-free). Among the three variants of MirAS, while MONETA achieves slightly weaker performance than MEMORA, and YAAD, the other two variants are close and depending on the task and model size, the best model can vary.", + "bbox": [ + 109, + 373, + 921, + 479 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.2 Scaling Pattern", + "text_level": 1, + "bbox": [ + 112, + 497, + 295, + 513 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To evaluate the scaling pattern of models and for comparing them with baseline, in this section, we plot their performance with varying the model size and the context window.", + "bbox": [ + 109, + 522, + 918, + 554 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Context Length. We first vary the training context length from 2K to 32K for two versions of our model with size 340M and 760M. The results are reported in Figure 3 (Middle and Right). All three variants of Miras scales better than state-of-the-art baselines when increasing the context length. We attribute this superior performance to: (1) expressive memory architecture. Contrary to baselines like Mamba2 and GSA that uses vector- and matrix-valued memory, our variants are using 2-layer MLPs with more expressive power to learn from longer sequences. (2) The choice of retention gate and attentional bias: All of our three variants go beyond the standard attentional biases and retention gates. These choices can help the memory to better manage its fixed-size capacity.", + "bbox": [ + 109, + 566, + 919, + 674 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Model Size. We also report the #FLOPs vs. perplexity of our models and baselines in Figure 3 (Left). All three variants outperforms all baselines given almost the same budget of FLOPs. These results, once again support the importance of powerful memory design.", + "bbox": [ + 109, + 685, + 919, + 734 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.3 Needle In Haystack", + "text_level": 1, + "bbox": [ + 112, + 750, + 334, + 767 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To evaluate the effective context window of our models and baselines, we use needle-in-haystack task. In this task, we evaluate the model on retrieving a piece of information (i.e., the \"needle\") from long distractor texts (i.e., the \"haystack\"). We focus on the Single NIAH (S-NIAH) task from RULER benchmark (Hsieh et al. 2024) and evaluate our models and baselines on sequences with length 1K, 2K, 4K, and 8K. The results are reported in Table 3. All our variants outperforms all the baselines with a considerable margin. Interestingly, MONETA shows better performance than others when the data is synthetic noise (S-NIAH-PK). This observation validates the effectiveness of $p$ -norm objective and retention gates as they are more robust to noise.", + "bbox": [ + 109, + 773, + 921, + 878 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/7f3725d584d1950412336fd521c5fe3fd51caa25f4aa98854ba39b2420f74a32.jpg", + "table_caption": [ + "Table 2: Performance of MIRAS's variants and baselines on language modeling and common-sense reasoning tasks. Hybrid models are marked with *. The best results of simple and hybrid models are highlighted. In largest scale, we compare our simple models with even hybrid models and highlight the best results." + ], + "table_footnote": [], + "table_body": "
ModelWiki. ppl ↓LMB. ppl ↓LMB. acc ↑PIQA acc ↑Hella. acc_n ↑Wino. acc ↑ARC-e acc ↑ARC-c acc_n ↑SIQA acc ↑BoolQ acc ↑
340M params / 15B tokens
Transformer++31.5241.0830.7662.9834.7650.5345.2124.0536.8158.24
RetNet32.5049.7328.2462.6134.1550.9144.2723.6236.7959.72
GLA28.5143.0228.7364.0535.9650.0054.1924.2937.1358.39
Mamba30.8340.2129.9463.7935.8849.8249.2424.5635.4160.07
DeltaNet28.6547.3028.4363.5235.9549.6352.6825.3737.9658.79
TTT27.4434.1930.0663.9735.7150.0853.0126.1137.3259.83
Gated DeltaNet27.0130.9434.1163.0838.1251.6055.2826.7734.8959.54
MONETA (ours)26.1929.3135.7063.9939.2352.0455.9627.1537.2960.22
YAAD (ours)26.6129.1134.0964.9339.8651.1254.7528.6433.8260.29
MEMORA (ours)27.1630.4433.6865.2139.1751.2353.4027.9934.159.29
760M params / 30B tokens
Transformer++25.2127.6435.7866.9242.1951.9560.3832.4639.5160.37
RetNet26.0824.4534.5167.1941.6352.0963.1732.7838.3657.92
Mamba222.9428.3733.5467.9042.7149.7763.4831.0940.0658.15
DeltaNet24.3724.6037.0666.9341.9850.6564.8731.3939.8859.02
TTT24.1723.5134.7467.2543.9250.9964.5333.8140.1659.58
Gated DeltaNet21.1822.0935.5468.0144.9550.7366.8733.0939.2159.14
Samba*20.6322.7139.7269.1947.3552.0166.9233.2038.9861.24
Gated DeltaNet-H2*19.8820.8339.1868.9548.2252.5767.0135.4939.3961.11
MONETA (ours)21.1821.9438.0269.5549.1653.0167.4736.0940.5363.18
YAAD (ours)20.9921.5737.8569.1450.0253.9367.7836.2741.0163.34
MEMORA (ours)22.2822.3138.1967.8249.3053.2863.5736.1540.9462.96
MONETA-H (ours)18.7220.1340.5970.8450.1354.1767.6436.7940.8762.43
YAAD-H (ours)18.5919.8040.2269.5150.4853.6968.0436.5540.2861.94
MEMORA-H (ours)18.2420.5539.9169.0649.8452.8866.9036.1240.9961.75
1.3B params / 100B tokens
Transformer++18.5318.3242.6070.0250.2353.5168.8335.1040.6657.09
RetNet19.0817.2740.5270.0749.1654.1467.3433.7840.7860.39
Mamba216.5612.5645.6671.8755.6755.2472.4737.8840.2060.13
DeltaNet17.7116.8842.4670.7250.9353.3568.4735.6640.2255.29
Gated DeltaNet16.4212.1746.6572.2555.7657.4571.2138.3940.6360.24
Samba*16.1313.2944.9470.9453.4255.5668.8136.1739.9662.11
Gated DeltaNet-H2*15.9112.5548.7672.1956.8857.7771.3339.0741.9161.55
MONETA (ours)15.5211.4747.8873.1656.1459.0972.5340.3241.9161.18
YAAD (ours)15.1811.8947.2372.8156.4659.0272.1440.0540.7361.86
MEMORA (ours)15.9012.0448.6773.1055.9957.3671.5537.9240.1961.34
", + "bbox": [ + 156, + 155, + 875, + 760 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "6.4 Ablation Study", + "text_level": 1, + "bbox": [ + 112, + 784, + 294, + 801 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section we perform ablation studies to validate if different design choices that we discussed through the paper are positively contributing for achieving better results.", + "bbox": [ + 109, + 809, + 919, + 839 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The Effect of $p$ on Performance. We first evaluate the effect of $p$ on the performance of MONETA. We vary the value of $p \\in \\{1, 1.5, 2, 2.8, 3, 3.2, 4\\}$ and context window from 2K to 16K. The results are reported in Figure 4. Interestingly, there is no monotone pattern when increasing the value of $p$ and the best performance is achieved when $p = 3$ , while $p = 4$", + "bbox": [ + 109, + 852, + 921, + 898 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/9d623c722a5ab850992a05508faa29932404f7de735834c9391b80064d8ea420.jpg", + "table_caption": [ + "Table 3: Performance of MONETA, YAAD, MEMORA, and baselines on NIAH task from RULER benchmark. The best results with highest accuracy are highlighted." + ], + "table_footnote": [], + "table_body": "
ModelS-NIAH-PKS-NIAH-NS-NIAH-WAverage
2K4K8K2K4K8K1K2K4K
Mamba298.661.431.098.455.814.262.242.24.252.0
DeltaNet96.898.898.647.215.412.885.246.220.057.9
Gated DeltaNet89.891.490.099.291.826.486.482.624.475.8
TTT98.498.898.060.236.610.285.878.828.066.1
MONETA99.498.898.899.499.492.892.288.270.893.5
YaAD99.298.694.499.898.693.291.889.667.492.9
MEMORA99.298.892.698.499.293.292.488.270.492.1
", + "bbox": [ + 256, + 131, + 777, + 268 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "achieves the worst performance. Also, although different values of $p$ results in different memory modules with varied performance, the scaling pattern when increasing the context length is almost the same.", + "bbox": [ + 109, + 294, + 919, + 325 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The Effect of $q$ on Performance. Similarly, we evaluate the effect of $q$ by varying it in $\\{2, 3, 4, 5\\}$ . Interestingly, contrary to $p$ , the value of $q$ can change the scaling pattern when increasing the context length. The main reason for this observation is that the value of $q$ determines the retention gate and a powerful retention gate can improve the memory management, resulting in better performance.", + "bbox": [ + 109, + 338, + 919, + 398 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The Effect of Design. To evaluate the architectural design choices, we perform an ablation study on YAAD. The results are in Table 4. The first row, reports the performance of YAAD, while (1) the second row removes the retention (i.e., $\\beta = 1$ ), (2) third row makes $\\delta$ input independent, (3) the third row removes $\\ell_2$ -loss from the Huber loss, (4) the forth row removes the $\\ell_1$ condition, and (5) the last row replaces the MLP with a linear layer. These results indicate that all design choices are contributing to the performance of the model.", + "bbox": [ + 109, + 412, + 919, + 489 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/a0ae2ecc3a1fdcbb355b80baff04bab982ec5c72538465ed489eb68b9ab78288.jpg", + "image_caption": [ + "Figure 4: The effect of parameters $p$ and $q$ on the performance with different context length." + ], + "image_footnote": [], + "bbox": [ + 117, + 518, + 349, + 630 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/85a921c0e5ac0811c346988a447963c727a9058481fbea39ac62dc440e750ccb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 362, + 516, + 596, + 630 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/f5b314e89c68e81cb2f087136de86e716ba11e0e457e1752e6ffa962a3bddd22.jpg", + "table_caption": [ + "Table 4: Ablation study on the components of YAAD." + ], + "table_footnote": [], + "table_body": "
ModelAvg. LM
YAAD53.98
- Retention Gate50.63
- Input-dependent δ52.19
l2-loss52.86
l1-loss53.04
linear memory51.57
", + "bbox": [ + 674, + 565, + 903, + 686 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 112, + 704, + 274, + 722 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this paper, we present MIRAS, a general framework that explains the connection of online optimization and test time memorization. MIRAS framework can explain the role of several standard architectural choices in the literature (e.g., forget gate) and helps design next generation of architectures that are capable of managing the memory better. Building upon our framework, we present three novel sequence models, each of which with its own (dis)advantages. Our experimental evaluations show that all these variants are more powerful than Transformers and linear RNNs, in various downstream tasks. In this work, we present a diverse set of variants using MIRAS. In future, exploring these alternative architectures for different downstream tasks is an interesting future direction.", + "bbox": [ + 109, + 736, + 919, + 842 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 89, + 235, + 106 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Ali Behrouz, Parsa Delavari, and Farnoosh Hashemi. \"Unsupervised Representation Learning of Brain Activity via Bridging Voxel Activity and Functional Connectivity\". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=n0jZfpLyh1.", + "[2] Ali Behrouz, Michele Santacatterina, and Ramin Zabih. \"Mambamixer: Efficient selective state space models with dual token and channel selection\". In: arXiv preprint arXiv:2403.19888 (2024).", + "[3] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. \"Titans: Learning to memorize at test time\". In: arXiv preprint arXiv:2501.00663 (2024).", + "[4] Alberto Bietti, Vivien Cabannes, Diane Bouchacourt, Herve Jegou, and Leon Bottou. \"Birth of a transformer: A memory viewpoint\". In: Advances in Neural Information Processing Systems 36 (2023), pp. 1560-1588.", + "[5] Yonatan Bisk, Rowan Zellers, Jianfeng Gao, Yejin Choi, et al. \"Piqa: Reasoning about physical commonsense in natural language\". In: Proceedings of the AAAI conference on artificial intelligence. Vol. 34. 2020, pp. 7432-7439.", + "[6] Leon Bottou and Vladimir Vapnik. \"Local learning algorithms\". In: Neural computation 4.6 (1992), pp. 888-900.", + "[7] Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. \"BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions\". In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). Ed. by Jill Burstein, Christy Doran, and Thamar Solorio. Minneapolis, Minnesota: Association for Computational Linguistics, June 2019, pp. 2924-2936. DOI: 10.18653/v1/N19-1300. URL: https://aclanthology.org/N19-1300/.", + "[8] Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. \"Think you have solved question answering? try arc, the ai2 reasoning challenge\". In: arXiv preprint arXiv:1803.05457 (2018).", + "[9] Imre Csiszar. \"On information-type measure of difference of probability distributions and indirect observations\". In: Studia Sci. Math. Hungar. 2 (1967), pp. 299-318.", + "[10] Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. \"Recurrent Neural Networks Learn to Store and Generate Sequences using Non-Linear Representations\". In: Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP. 2024, pp. 248-262.", + "[11] Karan Dalal, Daniel Koceja, Gashon Hussein, Jiarui Xu, Yue Zhao, Youjin Song, Shihao Han, Ka Chun Cheung, Jan Kautz, Carlos Guestrin, et al. \"One-Minute Video Generation with Test-Time Training\". In: arXiv preprint arXiv:2504.05298 (2025).", + "[12] Tri Dao and Albert Gu. \"Transformers are SSMs: Generalized models and efficient algorithms through structured state space duality\". In: arXiv preprint arXiv:2405.21060 (2024).", + "[13] Soham De, Samuel L Smith, Anushan Fernando, Aleksandar Botev, George Cristian-Muraru, Albert Gu, Ruba Haroun, Leonard Berrada, Yutian Chen, Srivatsan Srinivasan, et al. \"Griffin: Mixing gated linear recurrences with local attention for efficient language models\". In: arXiv preprint arXiv:2402.19427 (2024).", + "[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. \"An image is worth 16x16 words: Transformers for image recognition at scale\". In: arXiv preprint arXiv:2010.11929 (2020).", + "[15] Yossi Gandelsman, Yu Sun, Xinlei Chen, and Alexei Efros. \"Test-time training with masked autoencoders\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 29374-29385.", + "[16] Xavier Gonzalez, Andrew Warrington, Jimmy Smith, and Scott Linderman. \"Towards scalable and stable parallelization of nonlinear rnns\". In: Advances in Neural Information Processing Systems 37 (2024), pp. 5817-5849.", + "[17] Riccardo Grazzi, Julien Siems, Jörg KH Franke, Arber Zela, Frank Hutter, and Massimiliano Pontil. \"Unlocking state-tracking in linear rnns through negative eigenvalues\". In: arXiv preprint arXiv:2411.12537 (2024).", + "[18] Klaus Greff, Rupesh K Srivastava, Jan Koutnk, Bas R Steunebrink, and Jürgen Schmidhuber. \"LSTM: A search space odyssey\". In: IEEE transactions on neural networks and learning systems 28.10 (2016), pp. 2222-2232.", + "[19] Albert Gu and Tri Dao. \"Mamba: Linear-Time Sequence Modeling with Selective State Spaces\". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=tEYskw1VY2.", + "[20] Albert Gu, Karan Goel, and Christopher Re. \"Efficiently Modeling Long Sequences with Structured State Spaces\". In: International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=uYLFOz1v1AC." + ], + "bbox": [ + 114, + 114, + 921, + 869 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. \"Liquid Structural State-Space Models\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=g4OTKRKfS7R.", + "[22]Trevor Hastie, Robert Tibshirani, Jerome Friedman, et al. The elements of statistical learning. 2009.", + "[23] Elad Hazan et al. \"Introduction to online convex optimization\". In: Foundations and Trends® in Optimization 2.3-4 (2016), pp. 157-325.", + "[24] Donald Olding Hebb. The organization of behavior: A neuropsychological theory. Psychology press, 2005.", + "[25] Dan Hendrycks and Kevin Gimpel. \"Gaussian error linear units (gelus)\". In: arXiv preprint arXiv:1606.08415 (2016).", + "[26] Donald E Hilt and Donald W Seegrist. Ridge, a computer program for calculating ridge regression estimates. Vol. 236. Department of Agriculture, Forest Service, Northeastern Forest Experiment ..., 1977.", + "[27] Arthur E Hoerl and Robert W Kennard. \"Ridge regression: applications to nonorthogonal problems\". In: Technometrics 12.1 (1970), pp. 69-82.", + "[28] John J Hopfield. “Neural networks and physical systems with emergent collective computational abilities.” In: Proceedings of the national academy of sciences 79.8 (1982), pp. 2554-2558.", + "[29] Cheng-Ping Hsieh, Simeng Sun, Samuel Kriman, Shantanu Acharya, Dima Rekesh, Fei Jia, and Boris Ginsburg. \"RULER: What's the Real Context Size of Your Long-Context Language Models?\" In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=kIoBbc76Sy.", + "[30] Jerry Yao-Chieh Hu, Dennis Wu, and Han Liu. \"Provably optimal memory capacity for modern hopfield models: Transformer-compatible dense associative memories as spherical codes\". In: arXiv preprint arXiv:2410.23126 (2024).", + "[31] Peter J Huber. \"Robust estimation of a location parameter\". In: Breakthroughs in statistics: Methodology and distribution. Springer, 1992, pp. 492-518.", + "[32] Kazuki Irie, Robert Csordas, and Jürgen Schmidhuber. \"Practical computational power of linear transformers and their recurrent and self-referential extensions\". In: arXiv preprint arXiv:2310.16076 (2023).", + "[33] Kazuki Irie, Imanol Schlag, Robert Csordas, and Jurgen Schmidhuber. \"Going beyond linear transformers with recurrent fast weight programmers\". In: Advances in neural information processing systems 34 (2021), pp. 7703-7717.", + "[34] Vidit Jain and Erik Learned-Miller. \"Online domain adaptation of a pre-trained cascade of classifiers\". In: CVPR 2011. IEEE. 2011, pp. 577-584.", + "[35] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. \"Scaling laws for neural language models\". In: arXiv preprint arXiv:2001.08361 (2020).", + "[36] M. Karami and V. Mirrokni. Lattice: Learning to Efficiently Compress the Memory. 2025.", + "[37] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. \"Transformers are rnns: Fast autoregressive transformers with linear attention\". In: International conference on machine learning. PMLR. 2020, pp. 5156-5165.", + "[38] Dmitry Krotov. \"Hierarchical associative memory\". In: arXiv preprint arXiv:2107.06446 (2021).", + "[39] Dmitry Krotov and John J Hopfield. “Dense associative memory for pattern recognition”. In: Advances in neural information processing systems 29 (2016).", + "[40] Aonian Li, Bangwei Gong, Bo Yang, Boji Shan, Chang Liu, Cheng Zhu, Chunhao Zhang, Congchao Guo, Da Chen, Dong Li, et al. \"Minimax-01: Scaling foundation models with lightning attention\". In: arXiv preprint arXiv:2501.08313 (2025).", + "[41] Chengxuan Li, Di Huang, Zeyu Lu, Yang Xiao, Qingqi Pei, and Lei Bai. “A survey on long video generation: Challenges, methods, and prospects”. In: arXiv preprint arXiv:2403.16407 (2024).", + "[42] Xiaoyu Li, Yuanpeng Li, Yingyu Liang, Zhenmei Shi, and Zhao Song. \"On the expressive power of modern hopfield networks\". In: arXiv preprint arXiv:2412.05562 (2024).", + "[43] Yi Heng Lim, Qi Zhu, Joshua Selfridge, and Muhammad Firmansyah Kasim. \"Parallelizing non-linear sequential models over the sequence length\". In: The Twelfth International Conference on Learning Representations. 2024. URL: https://openreview.net/forum?id=E34A1VLN0v.", + "[44] Bo Liu, Rui Wang, Lemeng Wu, Yihao Feng, Peter Stone, and Qiang Liu. \"Longhorn: State space models are amortized online learners\". In: arXiv preprint arXiv:2407.14207 (2024).", + "[45] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. \"Lost in the middle: How language models use long contexts\". In: Transactions of the Association for Computational Linguistics 12 (2024), pp. 157-173.", + "[46] Elizabeth F Loftus. \"The reality of repressed memories.\" In: American psychologist 48.5 (1993), p. 518." + ], + "bbox": [ + 114, + 90, + 921, + 893 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Carlo Lucibello and Marc Mézard. \"Exponential capacity of dense associative memories\". In: Physical Review Letters 132.7 (2024), p. 077301.", + "[48] Julien Mairal. \"Incremental majorization-minimization optimization with application to large-scale machine learning\". In: SIAM Journal on Optimization 25.2 (2015), pp. 829-855.", + "[49] Harsh Mehta, Ankit Gupta, Ashok Cutkosky, and Behnam Neyshabur. \"Long Range Language Modeling via Gated State Spaces\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=5MkYIYCbva.", + "[50] Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. “Pointer Sentinel Mixture Models”. In: International Conference on Learning Representations. 2017. URL: https://openreview.net/forum?id=Byj72udxe.", + "[51] William Merrill, Jackson Petty, and Ashish Sabharwal. \"The Illusion of State in State-Space Models\". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=QZgo9JZpLq.", + "[52] Ravi Teja Mullapudi, Steven Chen, Keyi Zhang, Deva Ramanan, and Kayvon Fatahalian. \"Online model distillation for efficient video inference\". In: Proceedings of the IEEE/CVF International conference on computer vision. 2019, pp. 3573-3582.", + "[53] Tsendsuren Munkhdalai, Alessandro Sordoni, Tong Wang, and Adam Trischler. “Metalearned neural memory”. In: Advances in Neural Information Processing Systems 32 (2019).", + "[54] Tsendsuren Munkhdalai and Hong Yu. \"Neural semantic encoders\". In: Proceedings of the conference. Association for Computational Linguistics. Meeting. Vol. 1. NIH Public Access. 2017, p. 397.", + "[55] Daniel Neil, Jun Haeng Lee, Tobi Delbruck, and Shih-Chii Liu. \"Delta networks for optimized recurrent network computation\". In: International conference on machine learning. PMLR. 2017, pp. 2584-2593.", + "[56] Hideyuki Okano, Tomoo Hirano, and Evan Balaban. \"Learning and memory\". In: Proceedings of the National Academy of Sciences 97.23 (2000), pp. 12403-12404.", + "[57] Antonio Orvieto, Samuel L Smith, Albert Gu, Anushan Fernando, Caglar Gulcehre, Razvan Pascanu, and Soham De. \"Resurrecting recurrent neural networks for long sequences\". In: International Conference on Machine Learning. PMLR. 2023, pp. 26670-26698.", + "[58] Denis Paperno, German Kruszewski, Angeliki Lazaridou, Ngoc Quan Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernandez. \"The LAMBADA dataset: Word prediction requiring a broad discourse context\". In: Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Ed. by Katrin Erk and Noah A. Smith. Berlin, Germany: Association for Computational Linguistics, Aug. 2016, pp. 1525-1534. DOI: 10.18653/v1/P16-1144. URL: https://aclanthology.org/P16-1144/.", + "[59] Guilherme Penedo, Hynek Kydlcek, Loubna Ben allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro Von Werra, and Thomas Wolf. \"The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale\". In: The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2024. URL: https://openreview.net/forum?id=n6Sckn2QaG.", + "[60] Bo Peng, Eric Alcaide, Quentin Gregory Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Nguyen Chung, Leon Derczynski, Xingjian Du, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, Jiaju Lin, Krishna Sri Ipsit Mantri, Ferdinand Mom, Atsushi Saito, Guangyu Song, Xiangru Tang, Johan S. Wind, Stanisław Wozniak, Zhenyuan Zhang, Qinghua Zhou, Jian Zhu, and Rui-Jie Zhu. \"RWKV: Reinventing RNNs for the Transformer Era\". In: The 2023 Conference on Empirical Methods in Natural Language Processing. 2023. URL: https://openreview.net/forum?id=7SaXczaBpG.", + "[61] Bo Peng, Daniel Goldstein, Quentin Anthony, Alon Albalak, Eric Alcaide, Stella Biderman, Eugene Cheah, Xingjian Du, Teddy Ferdinan, Haowen Hou, et al. \"Eagle and finch: Rwkv with matrix-valued states and dynamic recurrence\". In: arXiv preprint arXiv:2404.05892 (2024).", + "[62] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Saiteja Utpala, et al. \"RWKV-7\" Goose\" with Expressive Dynamic State Evolution\". In: arXiv preprint arXiv:2503.14456 (2025).", + "[63] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Siateja Utpala, et al. \"Rwkv-7\" goose\" with expressive dynamic state evolution\". In: arXiv preprint arXiv:2503.14456 (2025).", + "[64] Yury Polyanskiy and Yihong Wu. Information theory: From coding to learning. Cambridge university press, 2025.", + "[65] DL Prados and SC Kak. \"Neural network capacity using delta rule\". In: *Electronics Letters* 25.3 (1989), pp. 197-199." + ], + "bbox": [ + 112, + 90, + 921, + 878 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 506, + 936, + 524, + 948 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[66] Zhen Qin, Songlin Yang, Weixuan Sun, Xuyang Shen, Dong Li, Weigao Sun, and Yiran Zhong. \"HGRN2: Gated Linear RNNs with State Expansion\". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=y6SqBJfCSk.", + "[67] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. \"Exploring the limits of transfer learning with a unified text-to-text transformer\". In: Journal of machine learning research 21.140 (2020), pp. 1-67.", + "[68] Hubert Ramsauer, Bernhard Schäfl, Johannes Lehner, Philipp Seidl, Michael Widrich, Lukas Gruber, Markus Holzleitner, Thomas Adler, David Kreil, Michael K Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. \"Hopfield Networks is All You Need\". In: International Conference on Learning Representations. 2021. URL: https://openreview.net/forum?id=tL89RnzIiCd.", + "[69] Meisam Razaviyayn, Mingyi Hong, and Zhi-Quan Luo. “A unified convergence analysis of block successive minimization methods for nonsmooth optimization”. In: SIAM Journal on Optimization 23.2 (2013), pp. 1126–1153.", + "[70] Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. \"Samba: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling\". In: arXiv preprint arXiv:2406.07522 (2024).", + "[71] Lee T Robertson. \"Memory and the brain\". In: Journal of dental education 66.1 (2002), pp. 30-42.", + "[72] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. \"Winogrande: An adversarial winograd schema challenge at scale\". In: Communications of the ACM 64.9 (2021), pp. 99-106.", + "[73] Imanol Schlag, Kazuki Irie, and Jürgen Schmidhuber. \"Linear transformers are secretly fast weight programmers\". In: International Conference on Machine Learning. PMLR. 2021, pp. 9355-9366.", + "[74] JH Schmidhuber. \"Learning to control fast-weight memories: An alternative to recurrent nets. Accepted for publication in\". In: Neural Computation (1992).", + "[75] Jürgen Schmidhuber. “Reducing the ratio between learning complexity and number of time varying variables in fully recurrent nets”. In: ICANN'93: Proceedings of the International Conference on Artificial Neural Networks Amsterdam, The Netherlands 13–16 September 1993 3. Springer. 1993, pp. 460–463.", + "[76] Jürgen Schmidhuber and Sepp Hochreiter. \"Long Short-term Memory\". In: Neural Computation MIT-Press (1997).", + "[77] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. \"Implicit Language Models are RNNs: Balancing Parallelization and Expressivity\". In: arXiv preprint arXiv:2502.07827 (2025).", + "[78] Shai Shalev-Shwartz et al. \"Online learning and online convex optimization\". In: Foundations and Trends® in Machine Learning 4.2 (2012), pp. 107-194.", + "[79] Julien Siems, Timur Carstensen, Arber Zela, Frank Hutter, Massimiliano Pontil, and Riccardo Grazzi. \"DeltaProduct: Increasing the Expressivity of DeltaNet Through Products of Householders\". In: arXiv preprint arXiv:2502.10297 (2025).", + "[80] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. \"Simplified State Space Layers for Sequence Modeling\". In: The Eleventh International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=Ai8Hw3AXqks.", + "[81] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. \"Simplified State Space Layers for Sequence Modeling\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=Ai8Hw3AXqks.", + "[82] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. \"Rofomer: Enhanced transformer with rotary position embedding\". In: Neurocomputing 568 (2024), p. 127063.", + "[83] Yu Sun, Xinhao Li, Karan Dalal, Jiarui Xu, Arjun Vikram, Genghan Zhang, Yann Dubois, Xinlei Chen, Xiaolong Wang, Sanmi Koyejo, et al. \"Learning to (learn at test time): Rnns with expressive hidden states\". In: arXiv preprint arXiv:2407.04620 (2024).", + "[84] Yutao Sun, Li Dong, Shaohan Huang, Shuming Ma, Yuqing Xia, Jilong Xue, Jianyong Wang, and Furu Wei. \"Retentive network: A successor to transformer for large language models\". In: arXiv preprint arXiv:2307.08621 (2023).", + "[85] W Scott Terry. Learning and memory: Basic principles, processes, and procedures. Routledge, 2017.", + "[86] Robert Tibshirani. \"Regression shrinkage and selection via the lasso\". In: Journal of the Royal Statistical Society Series B: Statistical Methodology 58.1 (1996), pp. 267-288.", + "[87] Matteo Tiezzi, Michele Casoni, Alessandro Betti, Tommaso Guidi, Marco Gori, and Stefano Melacci. \"On the resurgence of recurrent models for long sequences: Survey and research opportunities in the transformer era\". In: arXiv preprint arXiv:2402.08132 (2024).", + "[88] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. \"Llama: Open and efficient foundation language models\". In: arXiv preprint arXiv:2302.13971 (2023)." + ], + "bbox": [ + 112, + 90, + 921, + 907 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 506, + 936, + 524, + 948 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[89] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. \"Attention is All you Need\". In: Advances in Neural Information Processing Systems. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf.", + "[90] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. \"Attention is All you Need\". In: Advances in Neural Information Processing Systems. Ed. by I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf.", + "[91] Johannes Von Oswald, Maximilian Schlegel, Alexander Meulemans, Seijin Kobayashi, Eyvind Niklasson, Nicolas Zucchet, Nino Scherrer, Nolan Miller, Mark Sandler, Max Vlademyrov, et al. \"Uncovering mesa-optimization algorithms in transformers\". In: arXiv preprint arXiv:2309.05858 (2023).", + "[92] Ke Alexander Wang, Jiaxin Shi, and Emily B Fox. \"Test-time regression: a unifying framework for designing sequence models with associative memory\". In: arXiv preprint arXiv:2501.12352 (2025).", + "[93] Yingheng Wang, Zichen Wang, Gil Sadeh, Luca Zancato, Alessandro Achille, George Karypis, and Huzefa Rangwala. \"Long-context Protein Language Model\". In: bioRxiv (2024), pp. 2024-10.", + "[94] Songlin Yang, Jan Kautz, and Ali Hatamizadeh. “Gated Delta Networks: Improving Mamba2 with Delta Rule”. In: arXiv preprint arXiv:2412.06464 (2024).", + "[95] Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. “Gated Linear Attention Transformers with Hardware-Efficient Training”. In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=ia5XvxFUJT.", + "[96] Songlin Yang, Bailin Wang, Yu Zhang, Yikang Shen, and Yoon Kim. \"Parallelizing linear transformers with the delta rule over sequence length\". In: Advances in Neural Information Processing Systems 37 (2024), pp. 115491-115522.", + "[97] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. \"HellaSwag: Can a Machine Really Finish Your Sentence?\" In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Ed. by Anna Korhonen, David Traum, and Lluis Marquez. Florence, Italy: Association for Computational Linguistics, July 2019, pp. 4791-4800. DOI: 10.18653/v1/P19-1472. URL: https://aclanthology.org/P19-1472/.", + "[98] Biao Zhang and Rico Sennrich. \"Root mean square layer normalization\". In: Advances in Neural Information Processing Systems 32 (2019).", + "[99] Hao Zhang, Alexander C Berg, Michael Maire, and Jitendra Malik. \"SVM-KNN: Discriminative nearest neighbor classification for visual category recognition\". In: 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06). Vol. 2. IEEE. 2006, pp. 2126-2136." + ], + "bbox": [ + 112, + 90, + 921, + 575 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 508, + 936, + 524, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A Additional Related Work", + "text_level": 1, + "bbox": [ + 112, + 89, + 426, + 108 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Modern Linear RNNs. Recent efforts aim to overcome Transformers quadratic cost and limitations in long-context modeling by designing efficient recurrent alternatives (Tiezzi et al. 2024), mainly due to fast inference and training of such models. The first generation of models—such as RetNet (Sun et al. 2023), LRU (Orvieto et al. 2023), RWKV (Peng et al. 2023), S5 (Smith et al. 2023), and S4 (Gu et al. 2022)—uses data-independent transition matrix mechanism with Hebbian-like update rule. The second generation of such models started to incorporate input-dependent parameters into such linear architectures (e.g., Griffin (De et al. 2024), SSMs (Behrouz et al. 2024b; Dao et al. 2024; Hasani et al. 2023), RWKV6 (Peng et al. 2024)), and/or use more expressive memory updating rule based on delta rule (Liu et al. 2024a; Peng et al. 2025b; Schlag et al. 2021; Yang et al. 2024a,c). The next generation of models, extend the memory architecture to deep models, while using delta-rule-like update rule (Sun et al. 2024), or momentum-based update rule (Behrouz et al. 2024c). Recently, to further enhance the performance of delta-rule-based sequence models, Siemens et al. (2025) suggest using multiple gradient descent update per token, resulting in more expressive sequence models in state tracking tasks.", + "bbox": [ + 109, + 126, + 921, + 294 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In addition to the above fast linear recurrent sequence models, several studies have focused on (interpretable) non-linear RNNs (Csordás et al. 2024; Gonzalez et al. 2024; Karami et al. 2025; Lim et al. 2024; Merrill et al. 2024; Schone et al. 2025; Von Oswald et al. 2023), and how their training can be faster (Gonzalez et al. 2024; Lim et al. 2024; Schone et al. 2025). However, due to the recurrent nature of such models, parallelizing them in larger scales is still challenging.", + "bbox": [ + 109, + 300, + 921, + 363 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Fast Weight Programs. The idea of interpretation of linear layers as the key-value associative memory system backs to Hopfield networks (Hopfield 1982) and then fast weight programs, in which dynamic fast programs are incorporated into recurrent neural networks as writeable memory (Schlag et al. 2021; Schmidhuber 1992; Schmidhuber 1993). The two learning rules of Hebbian (Hebb 2005) and delta rule (Prados et al. 1989) are the most popular learning rules for them, which have been extensively explored in the literature (Irie et al. 2021; Munkhdalai et al. 2019, 2017; Schlag et al. 2021; Schmidhuber 1992; Yang et al. 2024a,c).", + "bbox": [ + 109, + 373, + 921, + 467 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Test Time Training. The key ideas of learning at test time backs to early studies on local learning Bottou et al. 1992, in which each test data is trained on its neighbors before making a prediction (Gandelsman et al. 2022; Zhang et al. 2006). Later applying this idea on modern architectures, it has shown promising performance in diverse downstream tasks such as vision tasks (Jain et al. 2011; Mullapudi et al. 2019), video generation (Dalal et al. 2025), etc., mostly due to their ability to mitigate out-of-distribution samples.", + "bbox": [ + 109, + 478, + 921, + 556 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Hopfield Networks. We build MIRAS based on the concept of associative memory in its broad form, where we learn an underlying mapping between keys and values. One of the earliest studies that discuss building neural architectures based on associative memory is Hopfield Networks (Hopfield 1982), in which associative memory is defined as the minimizing the energy function required to store keys and values. While traditional Hopfield networks has limited applicability in recent years (mainly due to limited capacity of vector-valued memory and energy function), several recent studies aim to improve their capacity by various techniques (Krotov 2021; Krotov et al. 2016; Li et al. 2024b), including extending the energy function of such models based on exponential kernels (Krotov et al. 2016; Lucibello et al. 2024), and discuss their connection to Transformers (Hu et al. 2024; Ramsauer et al. 2021).", + "bbox": [ + 109, + 566, + 921, + 690 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Unifying Frameworks. In recent years, there have been growing efforts to understand the underlying mechanism of sequence models and unify (a subset of) them through a single perspective. Dao et al. (2024) present SSD framework to connect linear Transformers and (a subset of) linear recurrent models through the lens of associative operators and structured matrices. The SSD framework, however, is limited to models with vector or matrix-valued memory that are updated using a Hebbian-like update rules. Later, Liu et al. (2024a) present an online learning perspective on (a subset of) linear recurrent models. While this framework can also explain more expressive recurrent models based on delta rule, it is limited to online learners (i.e., models that optimize their internal associative memory using stochastic optimizers, such as stochastic gradient descent) with matrix-valued memory. Several modern sequence models, such as Transformers (Vaswani et al. 2017b) or Titans (Behrouz et al. 2024c) cannot be expressed in this framework. Sun et al. (2024) further provide a unifying perspective on how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models, mainly due to limiting the objective to be regression loss. Recently, in a concurrent work to ours, Wang et al. (2025) also force models to have the same attentional bias objective and show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss.", + "bbox": [ + 109, + 700, + 923, + 916 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 506, + 936, + 526, + 949 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "However, this simplification, fully changes the understanding of underlying update rules in these models. For example, contrary to Wang et al. (2025), MIRAS can distinguish models with Hebbian-like update (with dot product similarity) and delta rule update (with regression loss). Furthermore, all presented sequence models in this work (e.g., MONETA, MEMORA, YAAD) as well as models like HGRN2 (Qin et al. 2024) are placed outside of this class of models, due to their different attentional bias.", + "bbox": [ + 111, + 92, + 921, + 167 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "B Proof of Proposition 3.2", + "text_level": 1, + "bbox": [ + 112, + 190, + 410, + 210 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Here we present the proof of Proposition 3.2. For the sake of completeness, let us first re-state this Proposition.", + "bbox": [ + 111, + 220, + 854, + 236 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Proposition 3.2. Let $\\eta_t = \\eta$ and define $h_t(W) \\coloneqq \\sum_{i=1}^{t-1} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i) + \\frac{1}{\\eta} R(W)$ . Assume $\\mathcal{W} = \\mathbb{R}^d$ and the function $h_t(W)$ is strictly convex in $W$ and let $\\mathcal{D}_h(\\cdot, \\cdot)$ be the Bregman divergence defined by function $h(\\cdot)$ , i.e., $\\mathcal{D}_h(W, W') = h(W) - h(W') - \\langle \\nabla h(W'), W - W' \\rangle$ . Set $\\mathrm{Ret}_t(W, W') = \\mathcal{D}_h(W, W')$ and $\\widetilde{\\ell}_t(W; x_t) = \\widehat{\\ell}_t(W; x_t)$ in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint).", + "bbox": [ + 111, + 242, + 919, + 306 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Proof. Let $\\{\\widehat{W}_1, \\widehat{W}_2, \\ldots\\}$ be the sequence of parameters obtained by (FTRL Viewpoint) and $\\{\\widetilde{W}_1, \\widetilde{W}_2, \\ldots\\}$ be the sequence of parameters obtained by (Learning-Retaining Viewpoint). To show both update rules are equivalent, it suffices to show that the above two sequences are the same if they are initialized at the same point. We prove this statement by induction. First of all, since both sequences are initialized at the same point, the induction base is satisfied (i.e. $\\widetilde{W}_1 = \\widehat{W}_1$ ). Now, assume by induction hypothesis that", + "bbox": [ + 111, + 319, + 919, + 395 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {W} _ {t - 1} = \\widehat {W} _ {t - 1}. \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 470, + 393, + 919, + 411 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "To complete the induction, we need to show $\\widetilde{W}_t = \\widehat{W}_t$ . To this end, notice that, by (Learning-Retaining Viewpoint), we have", + "bbox": [ + 111, + 415, + 916, + 444 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widetilde {\\ell} _ {t} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\operatorname {R e t} _ {t} (W, \\widetilde {W} _ {t - 1})\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 444, + 666, + 467 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Using the choice of the Attentional Bias and the Retention function in the Proposition, we obtain", + "bbox": [ + 112, + 470, + 763, + 486 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widehat {\\ell_ {t}} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W) - \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (\\widetilde {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) \\tag {34} \\\\ - \\frac {1}{\\eta} R (\\widetilde {W} _ {t - 1}) - \\left\\langle \\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} (\\widetilde {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\nabla R (\\widetilde {W} _ {t - 1}), W - \\widetilde {W} _ {t - 1} \\right\\rangle . \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 494, + 919, + 578 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Ignoring the constant terms and using the induction hypothesis (33), we get", + "bbox": [ + 112, + 584, + 622, + 599 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widehat {\\ell_ {t}} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W) \\tag {35} \\\\ - \\left\\langle \\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} (\\widehat {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\nabla R (\\widehat {W} _ {t - 1}), W - \\widehat {W} _ {t - 1} \\right\\rangle . \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 607, + 919, + 691 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "On the other hand, recall that $\\{\\widehat{W}_1,\\widehat{W}_2,\\ldots \\}$ is obtained by (FTRL Viewpoint). Therefore, we have", + "bbox": [ + 111, + 698, + 756, + 715 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {W} _ {t - 1} = \\arg \\min _ {W} \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\mathcal {R} _ {t} (W).\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 723, + 668, + 762 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Thus, we have", + "bbox": [ + 112, + 768, + 214, + 782 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} \\left(W _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) + \\frac {1}{\\eta} \\nabla R \\left(W _ {t - 1}\\right) = 0. \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 782, + 919, + 821 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Combining (36) and (35), we obtain", + "bbox": [ + 112, + 825, + 351, + 840 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\sum_ {i = 1} ^ {t} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W).\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 849, + 656, + 887 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "This implies $\\widetilde{W}_t = \\widehat{W}_t$ , which completes the proof.", + "bbox": [ + 112, + 896, + 446, + 912 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/3329723ab8e514721e400978c83f650752b2cf21d4fc36cede7c1a33e9e7b66c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 903, + 900, + 916, + 910 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 506, + 936, + 524, + 948 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C Experimental Setup", + "text_level": 1, + "bbox": [ + 112, + 89, + 369, + 111 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We perform experimental evaluation on the language modeling (Merit et al. 2017; Paperno et al. 2016), common-sense reasoning (Bisk et al. 2020; Clark et al. 2019; Clark et al. 2018; Sakaguchi et al. 2021; Zellers et al. 2019), and long context needle-in-haystack tasks (Hsieh et al. 2024). We compare our models with the state-of-the-art linear recurrent models, Transformers, and hybrid models (recurrent + attention). More specifically we compare with Transformer++ (Touvron et al. 2023), RetNet (Sun et al. 2023), Gated Linear Attention (GLA) (Yang et al. 2024b), Mamba (Gu et al. 2024), Mamba2 (Dao et al. 2024), DeltaNet (Yang et al. 2024c), TTT (Sun et al. 2024), and Gated DeltaNet (Yang et al. 2024a).", + "bbox": [ + 109, + 119, + 921, + 212 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/45c18238da20b86ece6c49f73c0da7ab6603bbe33993974acf302e629ba56a20.jpg", + "table_caption": [ + "Table 5: Architectural Details." + ], + "table_footnote": [], + "table_body": "
ModelBlockDimHeadPeak LRToken
170M12768163e-315B
340M241024161.5e-315B
780M241536161.25e-330B
", + "bbox": [ + 318, + 250, + 715, + 337 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 508, + 936, + 524, + 948 + ], + "page_idx": 25 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_model.json b/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_model.json new file mode 100644 index 0000000000000000000000000000000000000000..fa2a817e61e41e162376d1ece203b35d186a4a6f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_model.json @@ -0,0 +1,4806 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.264, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.13173v1 [cs.LG] 17 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.138, + 0.892, + 0.19 + ], + "angle": 0, + "content": "It’s All Connected: A Journey Through Test-Time Memorization, Attentional Bias, Retention, and Online Optimization" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.215, + 0.804, + 0.235 + ], + "angle": 0, + "content": "Ali Behrouz†, Meisam Razaviyayn†, Peilin Zhong†, and Vahab Mirrokni†" + }, + { + "type": "text", + "bbox": [ + 0.446, + 0.254, + 0.589, + 0.274 + ], + "angle": 0, + "content": "Google Research" + }, + { + "type": "text", + "bbox": [ + 0.319, + 0.277, + 0.72, + 0.291 + ], + "angle": 0, + "content": "{alibehrouz, Razaviyayn, peilinz, mirrokni}@google.com" + }, + { + "type": "title", + "bbox": [ + 0.486, + 0.323, + 0.547, + 0.336 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.343, + 0.88, + 0.565 + ], + "angle": 0, + "content": "Designing efficient and effective architectural backbones has been in the core of research efforts to enhance the capability of foundation models. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we reconceptualize neural architectures, including Transformers, Titans, and modern linear recurrent neural networks as associative memory modules that learn a mapping of keys and values using an internal objective, referred to as attentional bias. Surprisingly, we observed that most existing sequence models leverage either (1) dot-product similarity, or (2) \\(\\ell_2\\) regression objectives as their attentional bias. Going beyond these objectives, we present a set of alternative attentional bias configurations along with their effective approximations to stabilize their training procedure. We then reinterpret forgetting mechanisms in modern deep learning architectures as a form of retention regularization, providing a novel set of forget gates for sequence models. Building upon these insights, we present MIRAS, a general framework to design deep learning architectures based on four choices of: (i) associative memory architecture, (ii) attentional bias objective, (iii) retention gate, and (iv) memory learning algorithm. We present three novel sequence models—MONETA, YAAD, and MEMORA—that go beyond the power of existing linear RNNs while maintaining a fast parallelizable training process. Our experiments show different design choices in MIRAS yield models with varying strengths. For example, certain instances of MIRAS achieve exceptional performance in special tasks such as language modeling, commonsense reasoning, and recall intensive tasks, even outperforming Transformers and other modern linear recurrent models." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.588, + 0.291, + 0.606 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.618, + 0.921, + 0.71 + ], + "angle": 0, + "content": "Designing efficient architectural backbones for sequence modeling is a key to enhance the capability of foundation models in domains ranging from language (Behrouz et al. 2024c; Vaswani et al. 2017a) and computer vision (Dosovitskiy et al. 2020) to computational biology (Wang et al. 2024) and neuroscience (Behrouz et al. 2024a). While Transformers (Vaswani et al. 2017a), mainly due to their in-context learning and ability to learn at scale (Kaplan et al. 2020), have been firmly established as state-of-the-art (SOTA) models in sequence modeling, their quadratic time and space complexity limits their applicability in tasks that require long context modeling (Dalal et al. 2025; Li et al. 2024a; Liu et al. 2024b)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.717, + 0.922, + 0.853 + ], + "angle": 0, + "content": "Recent efforts aim to overcome Transformer limitations in long-context modeling by designing efficient recurrent alternatives (Behrouz et al. 2024c; Neil et al. 2017; Smith et al. 2022). Unlike Transformer's linearly growing memory (i.e., the KV cache), these models compress the context into a fixed size memory, demanding improved memory management for comparable performance. To design more effective architectures, studies focus on improving memory capacity and its management by using/designing more expressive: (1) Learning rules: from Hebbian rule (Hebb 2005) to Delta rule (Neil et al. 2017); (2) Forget gates: from LSTM's (Schmidhuber et al. 1997) to Mamba2's (Dao et al. 2024) and then Titan's forget gates (Behrouz et al. 2024c); and (3) More expressive memory architectures: from vector-valued memory in RetNet (Sun et al. 2023) and LRU (Orvieto et al. 2023) to neural deep memory in Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.86, + 0.92, + 0.906 + ], + "angle": 0, + "content": "At the core of these advancements lies a critical question: \"what is the underlying design framework behind these sequence models, and how can these models be enhanced?\" Taking inspiration from the broad definitions of associative memory and learning in neuropsychology literature (Okano et al. 2000), several studies discuss the connection between Transformers" + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.948 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.093, + 0.921, + 0.157 + ], + "angle": 0, + "content": "and (linear) Recurrent Neural Networks (RNNs) with associative memory (Bietti et al. 2023; Hopfield 1982; Ramsauer et al. 2021). These studies, however, either: (1) lack a universal explanation to fully illustrate the underlying learning algorithms, (2) are limited to a specific definition of associative memory and lack generalizability, and/or (3) are unable to describe standard, widely used components such as forget gate." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.167, + 0.923, + 0.306 + ], + "angle": 0, + "content": "Contributions. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we re-conceptualize neural architectures, including Transformers, Titans, and other modern linear recurrent neural networks based on a broad definition of associative memory with attentional bias. We define and formalize the concept of attentional bias as the internal memory objective of sequence models (see Section 3) that aims to learn the underlying mapping between inputs (i.e., keys and values). Our formulation reveals that almost all existing sequence models are associative memories that leverage the same type of attentional bias. We reinterpret existing forgetting mechanisms in modern deep learning architectures as a form of retention \\(\\ell_2\\)-regularization for the attentional bias, and then provide a novel set of alternative retention gates (forget gate) for sequence models, providing new insights on how to balance learning new concepts and the retention of previously learned concepts." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.31, + 0.923, + 0.372 + ], + "angle": 0, + "content": "Building upon our formulation of memory and forget gate, we present MIRAs1, a fundamental framework to design novel sequence modeling architectures by four choice of: (1) Attentional bias (i.e., memory objective), (2) Retention gate, (3) Memory architecture, and (4) Memory learning algorithm (i.e., optimizer). We motivate and discuss several novel design choices, leading to novel architectures beyond existing sequence modeling architectures." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.378, + 0.923, + 0.456 + ], + "angle": 0, + "content": "Finally, we focus on three novel variants of MIRAS-MONETA, YAAD, and MEMORA—that are based on attentional biases beyond simple \\(\\ell_2\\)-regression objective as well as novel retention gating mechanisms that are more robust than existing ones. We further perform experimental evaluations of these three variants on language modeling, common-sense reasoning, needle-in-haystack, and recall intensive tasks. The results illustrate the superior performance of these variants, outperforming state-of-the-art sequence models." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.467, + 0.923, + 0.591 + ], + "angle": 0, + "content": "Roadmap. In Section 2, we review literature and discuss relevant concepts that we use through the paper. In Section 3, we present and discuss the broad definition of associative memory with formally defining the concept of attentional bias. We then discuss two viewpoints—Learning-Retaining and Follow-the-Regularized-Leader (FTRL)—to interpret sequence modeling through the lens of optimization and prove the generality of Learning-Retaining over FTRL. In Section 4, we present our MIRAS framework and discuss how it unifies modern sequence models. In Section 5, to show the potential of MIRAS framework, we discuss a variety of novel design choices for (1) attentional bias, and (2) retention gate (forget gate). Later in Section 5.3, we present three novel sequence models as the variants of MIRAS, and then discuss how to train them in a parallelizable manner. Finally, our experimental evaluations are reported in Section 6." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.61, + 0.482, + 0.631 + ], + "angle": 0, + "content": "2 Preliminaries and Background" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.642, + 0.796, + 0.658 + ], + "angle": 0, + "content": "In this section, we review the related studies and background concepts that we use through the paper." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.67, + 0.921, + 0.716 + ], + "angle": 0, + "content": "Attention. Attention as the backbone of Transformers is a critical component that acts as their associative memory (Bietti et al. 2023). Given input \\( x \\in \\mathbb{R}^{N \\times d_{\\mathrm{in}}} \\), causal attention computes output \\( y \\in \\mathbb{R}^{N \\times d_{\\mathrm{in}}} \\) based on Softmax over input dependent key, value, and query matrices:" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.725, + 0.92, + 0.74 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} = x \\mathbf {W} _ {\\mathrm {Q}}, \\quad \\mathbf {K} = x \\mathbf {W} _ {\\mathrm {K}}, \\quad \\mathbf {V} = x \\mathbf {W} _ {\\mathrm {V}}, \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.744, + 0.92, + 0.795 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {i} = \\sum_ {j = 1} ^ {i} \\frac {\\exp \\left(\\mathbf {q} _ {i} ^ {\\top} \\mathbf {k} _ {j} / \\sqrt {d _ {\\mathrm {i n}}}\\right) \\mathbf {v} _ {j}}{\\sum_ {\\ell = 1} ^ {i} \\exp \\left(\\mathbf {q} _ {i} ^ {\\top} \\mathbf {k} _ {\\ell} / \\sqrt {d _ {\\mathrm {i n}}}\\right)}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.805, + 0.921, + 0.882 + ], + "angle": 0, + "content": "where \\(\\mathbf{W}_{\\mathrm{Q}}, \\mathbf{W}_{\\mathrm{K}}\\), and \\(\\mathbf{W}_{\\mathrm{V}} \\in \\mathbb{R}^{d_{\\mathrm{in}} \\times d_{\\mathrm{in}}}\\) are learnable parameters. While Transformers achieve significant improvements compared to traditional Recurrent Neural Networks (RNNs)—such as LSTM (Schmidhuber et al. 1997), their complexity that requires at least \\(N \\times d\\) operators to calculate the output has been the main motivation for researchers to think about alternative architectures. We divide and review the research efforts to design alternative architectures into two groups: (1) Linear shallow memory recurrent models, (2) Deep memory modules." + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.888, + 0.92, + 0.914 + ], + "angle": 0, + "content": "1 \"Miras\" is the translation of \"Legacy\" in several languages: such as Persian, Arabic, and Turkish. We choose this name since this framework provides clear steps for future studies to design powerful sequence models based on their task at hand." + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.523, + 0.949 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.422, + 0.097, + 0.616, + 0.114 + ], + "angle": 0, + "content": "Associative Memory" + }, + { + "type": "title", + "bbox": [ + 0.149, + 0.125, + 0.294, + 0.137 + ], + "angle": 0, + "content": "Memory Architecture" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.146, + 0.294, + 0.166 + ], + "angle": 0, + "content": "The neural architecture that stores memories." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.176, + 0.182, + 0.187 + ], + "angle": 0, + "content": "1. Vector" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.197, + 0.182, + 0.207 + ], + "angle": 0, + "content": "2.Matrix" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.217, + 0.291, + 0.229 + ], + "angle": 0, + "content": "3. Multilayer Perceptron (MLP)" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.238, + 0.236, + 0.26 + ], + "angle": 0, + "content": "4. Memory Mosaics" + }, + { + "type": "list", + "bbox": [ + 0.128, + 0.176, + 0.291, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.363, + 0.125, + 0.472, + 0.137 + ], + "angle": 0, + "content": "Attentional Bias" + }, + { + "type": "text", + "bbox": [ + 0.338, + 0.146, + 0.498, + 0.157 + ], + "angle": 0, + "content": "The memory internal objective." + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.175, + 0.442, + 0.188 + ], + "angle": 0, + "content": "1. \\(\\ell_p\\) Regression Loss" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.197, + 0.457, + 0.209 + ], + "angle": 0, + "content": "2. Dot Product Similarity" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.217, + 0.403, + 0.228 + ], + "angle": 0, + "content": "3. Huber Loss" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.238, + 0.419, + 0.26 + ], + "angle": 0, + "content": "4. KL-Divergence" + }, + { + "type": "list", + "bbox": [ + 0.321, + 0.175, + 0.457, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.562, + 0.125, + 0.663, + 0.137 + ], + "angle": 0, + "content": "Retention Gate" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.146, + 0.701, + 0.167 + ], + "angle": 0, + "content": "The gate to retain the past state of the memory." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.176, + 0.709, + 0.188 + ], + "angle": 0, + "content": "1. \\(\\ell_p\\) Regularization (Local or Global)" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.197, + 0.671, + 0.209 + ], + "angle": 0, + "content": "2. Elastic Net Regularization" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.217, + 0.613, + 0.229 + ], + "angle": 0, + "content": "3. KL Divergence" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.238, + 0.642, + 0.25 + ], + "angle": 0, + "content": "4. Bregman Divergence" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.176, + 0.709, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.748, + 0.125, + 0.876, + 0.138 + ], + "angle": 0, + "content": "Memory Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.737, + 0.147, + 0.886, + 0.17 + ], + "angle": 0, + "content": "The algorithm that learns the mapping." + }, + { + "type": "text", + "bbox": [ + 0.715, + 0.175, + 0.848, + 0.187 + ], + "angle": 0, + "content": "1. Gradient Descent (GD)" + }, + { + "type": "text", + "bbox": [ + 0.715, + 0.196, + 0.836, + 0.208 + ], + "angle": 0, + "content": "2. GD with Momentum" + }, + { + "type": "text", + "bbox": [ + 0.715, + 0.216, + 0.823, + 0.228 + ], + "angle": 0, + "content": "3. Newton's Method" + }, + { + "type": "text", + "bbox": [ + 0.715, + 0.237, + 0.866, + 0.259 + ], + "angle": 0, + "content": "4. Non-parametric Solutions ..." + }, + { + "type": "list", + "bbox": [ + 0.715, + 0.175, + 0.866, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.233, + 0.274, + 0.8, + 0.286 + ], + "angle": 0, + "content": "Associative Memory is a neural network that learns to map keys to values based on an Attentional Bias objective." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.288, + 0.918, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.413, + 0.922, + 0.489 + ], + "angle": 0, + "content": "Figure 1: The overview of MIRAS framework. MIRAS is based on four critical choices of (1) memory architecture, (2) attentional bias, (3) retention gate, and (4) memory learning algorithm. In this framework, the memory architecture determines the model capacity to memorize; attentional bias is responsible for modeling the underlying mapping patterns; retention gate determines how to balance learning new concepts and the retention of previously learned concepts; and memory learning algorithm is responsible for memory management." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.515, + 0.92, + 0.665 + ], + "angle": 0, + "content": "(Linear) Recurrent Models. For many years, non-linear (gated) recurrent neural networks had been the de facto architectural backbones in deep learning (Greff et al. 2016). Their recurrent nature, however, results in non-parallelizable training, making their large scale training infeasible. To this end, in recent years, linear RNNs as alternatives to both Transformers and non-linear RNNs attract much attention mainly due to their parallelizable and linear-time training while maintaining competitive performance (Peng et al. 2025a; Sun et al. 2023; Yang et al. 2024c). Earlier variants of linear RNNs (De et al. 2024; Sun et al. 2023; Yang et al. 2024b), which mostly are based on Hebbian learning rule (Hebb 2005), aim to compress the data into their vector-valued (or matrix-valued) memory (De et al. 2024; Katharopoulos et al. 2020; Liu et al. 2024a; Sun et al. 2023; Yang et al. 2024b). Let \\(\\mathcal{M}_t \\in \\mathbb{R}^{d \\times n}\\) be the memory (\\(n = 1\\) means vector-valued memory), and \\(\\mathbf{k}, \\mathbf{v} \\in \\mathbb{R}^d\\) are keys and values (i.e., projection of input \\(x_t \\in \\mathbb{R}^d\\)), a simple general formulation for such linear RNNs can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.432, + 0.678, + 0.92, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} _ {t} = A _ {t} * \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.707, + 0.92, + 0.797 + ], + "angle": 0, + "content": "where \\(*\\) is an arbitrary associative operator and \\(A_{t}\\) is a data-(in)dependent diagonal matrix or a scalar (Yang et al. 2024c). Despite the efficiency that comes with the linear recurrent nature of these models, the memory can overflow mainly due to the additive (without replacement) nature of Hebbian learning rule, resulting in limited memory capacity and limited expressive power in in-context learning tasks. Moreover, the vector-valued memory of these architectures can limit their ability to learn/memorize large context window, mainly due to the limited expressive power of memory to learn the underlying patterns of data (Behrouz et al. 2024c; Sun et al. 2024)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.805, + 0.922, + 0.911 + ], + "angle": 0, + "content": "To address the above mentioned limitations, recurrent models that use a matrix-valued memory with Delta learning rule has gained popularity in recent years (Neil et al. 2017; Schlag et al. 2021; Yang et al. 2024c). Despite significant advantages, even these delta-rule-based recurrent models face theoretical limitations (Irie et al. 2023) with moderate performance in practice (Yang et al. 2024c). Recently, several studies aim to improve the performance of such models by adding scalar or channel-wise forget gate mechanisms (Peng et al. 2025b; Yang et al. 2024a), using negative eigenvalues (Grazzi et al. 2024), and multiple learning steps (Siems et al. 2025). They, however, still suffer from performance drop in long context, mainly due to the less expressive memory architectures (Behrouz et al. 2024c)." + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.091, + 0.922, + 0.168 + ], + "angle": 0, + "content": "Table 1: Overview of recent sequence models in MIRAS framework perspective. Surprisingly, all models are using the same type of attentional bias and regularization (forget gate). Note that these architectural choices does not uniquely identify the backbone as there are other design choices (e.g., input-dependency, channel-wise parameters, etc.) as well as the use of other components such as attention, convolutions, etc. Note that for attentional bias and retention gate, we are referring to the original design of MIRAS, discussed in Equation 4 and Remark 1." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.178, + 0.917, + 0.502 + ], + "angle": 0, + "content": "
ModelMemory ArchitectureAttentional BiasRetention Gate†Memory AlgorithmMemory Write Operation
Shallow Memory
RetNet (2023)VectorDot-ProductL2GDMt=αMt-1+vtktT
Transformer (2017)MatrixL2-NonparametricMt=Mt-1∪{kt, vt}
LA (2021)MatrixDot-Product-GDMt=Mt-1+vtktT
DFWMatrixDot-ProductL2GDMt=(βtαT) ⊙ Mt-1+vtktT
Lightening Attention (2025)MatrixDot-ProductL2GDMt=αMt-1+vtktT
GLA (2024)MatrixDot-ProductL2GDMt=Diag(αt)Mt-1+vtktT
Mamba (2024)MatrixDot-ProductL2GDMt=αMt-1+vtktT
HGRN2 (2024)MatrixL1L2GDMt=Diag(αt)Mt-1+vt(1-αt)T
DeltaNet (2017)MatrixL2-GDMt=(I-βtktkT)Mt-1+βtvtktT
Longhorn (2024)MatrixL2-Implicit GDMt=(I-βtktkT)Mt-1+(βt1+ktkβt)xtkT
TTT-Linear (2024)MatrixL2-GDMt=Mt-1-η∇L(Mt-1, xt)
Gated DeltaNet (2024)MatrixL2L2GDMt=(αt(I-βtktkT))Mt-1+βtvtktT
RWKV-7 (2025)MatrixL2L2GDMt=diag(αt)(I-βtktkT)Mt-1+βtvtktT
DeltaProduct (2025)MatrixL2L2MGD*Mt=(αtΠi=1n(I-βt,ikt,i)T)Mt-1+Σj=1nΠi=j(I-βt,ivtj,kj,i)
Deep Memory
TTT-MLP (2024)2-layer MLPL2-GDMt=Mt-1-η∇L(Mt-1;kt, vt)
Titans-LMM (2024)k-layer MLPL2L2GD + MomentumMt=αMt-1-St, where St=ηSt-1-θt∇L(Mt-1;kt, vt)
MONETA (ours)2-layer MLPLpLqGDAt=AtA1-ηt∇lp(Wt-1;kt, vt), Wt=At/||At||q-2
YAAD (ours)2-layer MLPHuberL2GDWt=atWt-1-(ηt∇ε2(Wt-1;kt, vt) if ||M(kt)-vt|≤δt, ηtδt∇ε1(Wt-1;kt, vt) Otherwise.
MEMORA (ours)2-layer MLPL2KLGDWt=Softmax(αt log(Wt-1)-ηt∇ε2(Wt-1;kt, vt))
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.123, + 0.503, + 0.335, + 0.513 + ], + "angle": 0, + "content": "* is using multiple rounds of GD per token." + }, + { + "type": "table_footnote", + "bbox": [ + 0.123, + 0.513, + 0.891, + 0.525 + ], + "angle": 0, + "content": "For the sake of clarity, we use L2 for all modified L2-like regularizations. However, in fact, only Titans and RWKV-7 are using L2 retention gate (see Section 4)" + }, + { + "type": "list", + "bbox": [ + 0.123, + 0.503, + 0.891, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.552, + 0.92, + 0.72 + ], + "angle": 0, + "content": "Deep Memory Module: Titans and Test Time Training. To overcome the limited memory and to extend the effective context length of deep sequence models, more recent studies focus on a new generation of architectures with deep memory module (Behrouz et al. 2024c; Sun et al. 2024). These architectures are built on the meta-learning perspective, where the memory is an MLP architecture that is updated using gradient descent (with momentum) (Behrouz et al. 2024c; Sun et al. 2024). Sun et al. (2024) further provide a unifying perspective that how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models. Recently, in a concurrent work to ours, Wang et al. (2025) show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss. It, however, still remains unanswered that \"What is the underlying design framework behind these sequence models that can accurately unify existing architectures?\" Moreover, the role of forget gates and its alternative choices in modern sequence models is surprisingly less explored." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.741, + 0.719, + 0.762 + ], + "angle": 0, + "content": "3 Associative Memory, Attentional Bias, and Retention" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.772, + 0.92, + 0.865 + ], + "angle": 0, + "content": "Associative memory, which is an inseparable component of learning in humans (Terry 2017), has been the inspiration for many artificial neural architectures in the literature (Behrouz et al. 2024c; Hopfield 1982; Neil et al. 2017). These studies, however, define instances of the concept of associative memory, limiting the architecture to a specific class of similarity metrics between entities (i.e., keys and values). That is, broadly speaking, associative memory is an operator that maps a set of keys \\( K \\) to a set of values \\( V \\), and so to learn the underlying mapping patterns in data, it requires an objective that targets a type of memory and measures the quality of learned mappings:" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.87, + 0.92, + 0.903 + ], + "angle": 0, + "content": "Definition 3.1 (Associative Memory and Attentional Bias). Given a set of keys \\(\\mathcal{K} \\subseteq \\mathbb{R}^{d_k}\\) and values \\(\\mathcal{V} \\subseteq \\mathbb{R}^{d_o}\\), associative memory is an operator \\(\\mathcal{M}: \\mathcal{K} \\to \\mathcal{V}\\). Learning the mapping of associative memory is based on an objective \\(\\mathcal{L}\\), called" + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.523, + 0.949 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.093, + 0.759, + 0.109 + ], + "angle": 0, + "content": "Attentional Bias, that determines the type of memory and its tendency to prioritize some events:" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.12, + 0.92, + 0.142 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} ^ {*} = \\arg \\min _ {\\mathcal {M}} \\quad \\mathcal {L} (\\mathcal {M} (\\mathcal {K}); \\mathcal {V}). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.161, + 0.304, + 0.175 + ], + "angle": 0, + "content": "A few remarks are in order:" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.184, + 0.921, + 0.23 + ], + "angle": 0, + "content": "Remark 1. When we parameterize the memory with parameter \\( W \\), we use \\( \\mathcal{M}(W, \\mathbf{k}) \\). In this parametric setting, the optimization problem in (4) should be performed over the parameter \\( W \\). Furthermore, in the parametric setup, we might use an additional regularization \\( \\mathcal{R}(W) \\) to control the retaining of the past data." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.237, + 0.922, + 0.297 + ], + "angle": 0, + "content": "Remark 2. Learning the mapping between keys and values (Equation 4) is a meta-learning problem, in which the attentional bias is optimized in the inner-loop and all other parameters of the neural network (e.g., linear projections, convolutions, etc.) are optimized in the outer-loop. Therefore, the model learns how to store the data into its parameters at test time (Behrouz et al. 2024c; Sun et al. 2024)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.316, + 0.792, + 0.334 + ], + "angle": 0, + "content": "3.1 Learning to Memorize and to Retain Through the Lens of Optimization" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.34, + 0.92, + 0.386 + ], + "angle": 0, + "content": "Definition 3.1 translates the design of a neural architecture based on the concept of associative memory to learning the underlying mapping between keys and values, by minimizing an objective \\(\\mathcal{L}\\). To optimize Equation 4, one simple approach is to utilize the idea of gradient descent. Specifically, given a new pair of keys and values, we update the memory as:" + }, + { + "type": "equation", + "bbox": [ + 0.409, + 0.398, + 0.92, + 0.415 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t - 1} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.426, + 0.92, + 0.502 + ], + "angle": 0, + "content": "where, for simplicity, we use the definition \\(\\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\coloneqq \\mathcal{L}(\\mathcal{M}(W; \\mathbf{k}_t), \\mathbf{v}_t)\\). Behrouz et al. (2024c) re-interpret the formulation as a momentary surprise metric, where the model memorizes tokens that violates the expectation of the objective (i.e., being surprising to the memory). Although the choice of objective is an important step to fully interpret Equation 5 (which we discuss in detail in Section 5), there are different viewpoints to interpret this update rule in its general format, which later can help us to go beyond existing architectures:" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.52, + 0.767, + 0.538 + ], + "angle": 0, + "content": "3.2 Viewpoint 1: Online Regression and Follow-The-Regularized-Leader" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.545, + 0.816, + 0.56 + ], + "angle": 0, + "content": "Equation (5) can be viewed as one step of online gradient descent over the sequence of the loss functions" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.572, + 0.92, + 0.589 + ], + "angle": 0, + "content": "\\[\n\\ell \\left(W; \\mathbf {k} _ {1}, \\mathbf {v} _ {1}\\right), \\ell \\left(W; \\mathbf {k} _ {2}, \\mathbf {v} _ {2}\\right), \\dots , \\ell \\left(W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\dots . \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.6, + 0.92, + 0.646 + ], + "angle": 0, + "content": "It is well known that the online gradient descent can be viewed as a special case of Follow-The-Regularized-Leader (FTRL) algorithm with a special choice of loss functions (Shalev-Shwartz et al. 2012, Chapter 2) and (Hazan et al. 2016). Specifically, assuming \\( W_0 = 0 \\), the update rule in (5) is equivalent to" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.658, + 0.92, + 0.697 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\arg \\min _ {W} \\quad \\sum_ {i = 1} ^ {t} \\left\\langle W - W _ {i - 1}, \\nabla \\ell \\left(W _ {i - 1}; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) \\right\\rangle + \\frac {1}{2 \\eta} \\| W \\| _ {2} ^ {2}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.707, + 0.919, + 0.755 + ], + "angle": 0, + "content": "where the term \\(\\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; \\mathbf{k}_i, \\mathbf{v}_i) \\rangle\\) is the local linear approximation of the original loss at time \\(i\\) and the second term is a regularization term. While the first part \\(\\sum_{i=1}^{t} \\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; \\mathbf{k}_i, \\mathbf{v}_i) \\rangle\\) measures how well can the memory learn all the past tokens, the second term \\(\\frac{1}{2\\eta} \\|W\\|_2^2\\) penalizes the memory update with respect to the size of memory." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.761, + 0.922, + 0.837 + ], + "angle": 0, + "content": "Equation (7) uses linear approximation of the loss function and quadratic regularization. We can, however, in principle use other approximations of the loss function as well as other regularization functions, as used in the past in online optimization (Hazan et al. 2016; Shalev-Shwartz et al. 2012) or in general optimization (Miral 2015; Razaviyayn et al. 2013). Such changes are the idea behind the development of other optimization algorithms such mirror descent. More specifically, we can generalize the update rule in (7) to the form:" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.855, + 0.92, + 0.917 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\arg \\min _ {W \\in \\mathcal {W}} \\underbrace {\\sum_ {i = 1} ^ {t} \\widehat {\\ell_ {i}} (W ; \\mathbf {k} _ {i} , \\mathbf {v} _ {i})} _ {\\text {A t t e n t i o n a l B i a s}} + \\underbrace {\\frac {1}{\\eta_ {t}} \\mathcal {R} _ {t} (W)} _ {\\text {M e m o r y S t a b i l i t y}}. \\tag {FTRLViewpoint}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.921, + 0.172 + ], + "angle": 0, + "content": "In this update rule, the term \\(\\sum_{i=1}^{t} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i)\\) aims at memorizing the tokens at test time, while the term \\(\\mathcal{R}_t(W)\\) regularizes the learning dynamics and take the size of the memory into account when updating it by a new incoming data. Choosing different loss functions \\(\\widehat{\\ell}_i(W; x_i)\\) and the regularization term \\(\\frac{1}{\\eta_t} \\mathcal{R}_t(W)\\) can lead to different algorithms such as (online) gradient descent or mirror descent. In this generalization, \\(\\eta_t\\) to can be data-dependent. Moreover, we will allow imposing constraint \\(\\mathcal{W}\\) on the choice \\(W\\)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.188, + 0.861, + 0.206 + ], + "angle": 0, + "content": "3.3 Viewpoint 2: Learning the Latest Token While Retaining Previous Information" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.213, + 0.921, + 0.259 + ], + "angle": 0, + "content": "Another way to interpret the update rule (5) is to view it as learning from the latest key-value pair \\((\\mathbf{k}_i, \\mathbf{v}_i)\\) (via using its gradient or surprise metric), while staying close to the previous state \\(W_{t-1}\\) to retain the previously memorized tokens. Formally, (5) is equivalent to" + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.27, + 0.731, + 0.3 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\arg \\min _ {W} \\left\\langle W - W _ {t - 1}, \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\right\\rangle + \\frac {1}{2 \\eta_ {t}} \\left\\| W - W _ {t - 1} \\right\\| _ {2} ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.311, + 0.919, + 0.342 + ], + "angle": 0, + "content": "The first term locally approximates \\(\\ell(W; \\mathbf{k}_t, \\mathbf{v}_t)\\) around the previous state \\(W_{t-1}\\), while the last term regularizes deviations from \\(W_{t-1}\\). This form can generalize to" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.374, + 0.921, + 0.415 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\arg \\min _ {W \\in \\mathcal {W}} \\underbrace {\\widetilde {\\ell_ {t}} (W ; \\mathbf {k} _ {t} , \\mathbf {v} _ {t})} _ {\\text {A t t e n t i o n a l B i a s}} + \\underbrace {\\operatorname {R e t} _ {t} (W , W _ {t - 1})} _ {\\text {R e t e n t i o n}}, \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\text {(L e a r n i n g - R e t a i n i n g V i e w p o i n t)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.425, + 0.92, + 0.472 + ], + "angle": 0, + "content": "where the term \\(\\widetilde{\\ell_t} (W;\\mathbf{k}_t,\\mathbf{v}_t)\\) is an approximation of \\(\\ell (W;\\mathbf{k}_t,\\mathbf{v}_t)\\) and minimizing it corresponds to Learning from the new concepts \\((\\mathbf{k}_t,\\mathbf{v}_t)\\). The second term \\(\\mathrm{Ret}_t(W,W_{t - 1})\\) regularizes the changes in \\(W\\) to make the learning dynamics stable and to retain previously learned knowledge. This Retention function may have local and global components:" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.482, + 0.68, + 0.537 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e t} _ {t} \\left(W, W _ {t - 1}\\right) = \\underbrace {\\frac {1}{\\eta_ {t}} \\mathrm {D} _ {t} \\left(W , W _ {t - 1}\\right)} _ {\\text {L o c a l R e t e n t i o n}} + \\underbrace {\\frac {1}{\\alpha_ {t}} \\mathrm {G} _ {t} \\left(W\\right)} _ {\\text {G l o b a l R e t e n t i o n}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.546, + 0.92, + 0.637 + ], + "angle": 0, + "content": "Here, the term \\( \\mathrm{D}_t(W, W_{t-1}) \\), which is a premetric that controls the deviations from \\( W_{t-1} \\), aims at retaining previously learned knowledge. The coefficient \\( \\eta_t \\) can be viewed as a meta in-context learning rate, where larger values of \\( \\eta_t \\) leads to learning more from new concepts, while allowing higher forgetting of previously learned concepts. The second term is a global retention that controls the change of the memory with respect to its size. The special instances of the above viewpoint (e.g., without global retention, with implicit closed-form solution, and/or with limited memory structure) have been the motivation behind some of the recent studies such as Liu et al. (2024a)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.655, + 0.549, + 0.673 + ], + "angle": 0, + "content": "3.4 Further Discussions on the Two Viewpoints" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.68, + 0.92, + 0.755 + ], + "angle": 0, + "content": "The (FTRL Viewpoint) and (Learning-Retaining Viewpoint) are connected through the lens of online optimization. For example, as discussed above, by choosing linear approximation of the loss and quadratic regularization/retention, they can both cover online gradient descent update in (5) as a special case. One straightforward way to make the connection explicit is by defining the premetric \\(\\mathrm{D}_t(W;W^{\\prime})\\) based on the previous loss functions and the regularization, as described in Proposition 3.2 below:" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.762, + 0.92, + 0.826 + ], + "angle": 0, + "content": "Proposition 3.2. Let \\(\\eta_t = \\eta\\) and define \\(h_t(W) \\coloneqq \\sum_{i=1}^{t-1} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i) + \\frac{1}{\\eta} R(W)\\). Assume \\(\\mathcal{W} = \\mathbb{R}^d\\) and the function \\(h_t(W)\\) is strictly convex in \\(W\\) and let \\(\\mathcal{D}_h(\\cdot, \\cdot)\\) be the Bregman divergence defined by function \\(h(\\cdot)\\), i.e., \\(\\mathcal{D}_h(W, W') = h(W) - h(W') - \\langle \\nabla h(W'), W - W' \\rangle\\). Set \\(Ret_t(W, W') = \\mathcal{D}_h(W, W')\\) and \\(\\widetilde{\\ell}_t(W; x_t) = \\widehat{\\ell}_t(W; x_t)\\) in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.832, + 0.92, + 0.892 + ], + "angle": 0, + "content": "We provide the proof in Appendix B. The above proposition shows that (Learning-Retaining Viewpoint) can also explain the approaches obtained by (FTRL Viewpoint), under some mild assumptions. Hence, (Learning-Retaining Viewpoint) may be seen as a more general version. This is why we focus on this viewpoint in most of our derivations in the next sections." + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.949 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.093, + 0.923, + 0.17 + ], + "angle": 0, + "content": "Remark 3. Given the above viewpoint, we can see that even by using additional global regularization there is no memory erasing or forgetting process (a common term in modern architectures (Behrouz et al. 2024c; Yang et al. 2024a)) but the model might decide to not retain the past state of the memory. Interestingly, this observation also matches the human memory process, where brain does not erase memories but they might become inaccessible due to retrieval failures (Robertson 2002). Therefore, instead of calling it a forget gate, later on, we use \"Retention Gate\" to refer to this term." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.176, + 0.923, + 0.268 + ], + "angle": 0, + "content": "Remark 4. As we discuss in Section 4 and summarize in Table 1, most existing modern sequence models are optimizing associative memory objective (attentional bias in Equation 4) using gradient descent. Therefore, to provide further intuition about the connection of existing sequence models as well as their online learning interpretations, we discuss the above two viewpoints that are limited to gradient descent-based update rules. Our initial definition of attentional bias and associative memory in Equation 4, however, is broader and can be optimized by any optimization algorithm (e.g., even Newton's method, or non-parametric solutions)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.289, + 0.866, + 0.31 + ], + "angle": 0, + "content": "4 MirAs: Learning to Memorize with Robust and Expressive Memory" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.32, + 0.923, + 0.457 + ], + "angle": 0, + "content": "Building upon our definition of associative memory, attentional bias, and previous viewpoints, we present MIRAs framework that not only accurately unifies existing backbone architectures but it also provides insights on how to design the next generation of sequence models. As discussed earlier in Section 3, learning an associative memory can be interpreted as a meta-learning task, in which the associative memory learns how to compress and store data into its parameters at test time. The architecture of the memory in such tasks is particularly important as in longer contexts, the expressivity of the memory structure can limit its ability to learn the underlying patterns. Therefore, the first choice to design a sequence model is the structure of the memory. Given the structure of the memory, parameterized by a set of parameters \\( W \\), as discussed earlier, we aim to minimize a loss function \\( \\ell(W; \\cdot, \\cdot) \\) with a retention regularizer \\( \\mathrm{Ret}(\\cdot) \\) via a learning algorithm (e.g., gradient descent). Accordingly, MIRAs requires four design choices:" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.463, + 0.92, + 0.51 + ], + "angle": 0, + "content": "1. Memory Structure: This choice specifies the architecture of the memory. For example, this architecture can be a vector, a linear function, a Multilayer Perceptron (MLP) layer, or even more complex structures. We may restrict the choice of \\( W \\) to be within a certain region, e.g., \\( W \\) to lie within an \\( L_{2} \\) ball to avoid infinite values or unstable training." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.516, + 0.922, + 0.562 + ], + "angle": 0, + "content": "2. Attentional Bias: A key choice is the attentional bias objective \\(\\mathcal{L}(\\cdot)\\) in Equation 4. We can even consider different approximations of the loss function, (e.g., \\(\\widehat{\\ell} (\\cdot ,\\cdot)\\) in (FTRL Viewpoint) or \\(\\widetilde{\\ell} (\\cdot ,\\cdot)\\) in (Learning-Retaining Viewpoint)). The choice of attentional bias determines how memory memorizes the context, maps the inputs, and prioritizes the events." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.569, + 0.923, + 0.615 + ], + "angle": 0, + "content": "3. Memory Stability and Retention: Another key choice is the retention regularizer \\(\\mathcal{R}(\\cdot)\\) (e.g., \\(\\mathcal{R}_t(\\cdot)\\) in (FTRL Viewpoint) and \\(\\mathrm{Ret}_t(\\cdot)\\) in (Learning-Retaining Viewpoint)). In parametric setups, this choice balances learning with retention of past state. An effective retention gate is key to the good performance in long context tasks." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.622, + 0.92, + 0.667 + ], + "angle": 0, + "content": "4. Memory Algorithm: Finally, this choice specifies the learning algorithm that we use to optimize the memory objective. One may use gradient descent, gradient descent with momentum, or any other algorithm (including finding non-parametric solutions)." + }, + { + "type": "list", + "bbox": [ + 0.118, + 0.463, + 0.923, + 0.667 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.674, + 0.92, + 0.735 + ], + "angle": 0, + "content": "The above choices are major design choices for designing backbone sequence models in neural architectures. There are, however, minor decisions that can distinguish models; i.e., data-dependent or independent parameters, scalar or channel-wise learning rate/retaining gate, etc. Next, we discuss the overview of how existing architectures fit into MIRAS framework." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.748, + 0.92, + 0.839 + ], + "angle": 0, + "content": "RNNs with Hebbian Rule. The first generation of modern recurrent architectures (e.g., Linear attention (Katharopoulos et al. 2020), RetNet (Sun et al. 2023), Mamba (Gu et al. 2024), and GLA (Yang et al. 2024b)) are based on Hebbian-like (e.g., gated Hebbian) learning rule (Hebb 2005). We let attentional bias be the dot product similarity. That is, given a memory \\(\\mathcal{M} \\in \\mathbb{R}^{d \\times n}\\) and \\(\\mathbf{k}, \\mathbf{v} \\in \\mathbb{R}^d\\), we define \\(\\tilde{\\ell}_t \\coloneqq -2\\langle \\mathcal{M}_t \\mathbf{k}_t, \\mathbf{v}_t \\rangle\\) and local retention as \\(\\mathrm{Ret}_t(\\mathcal{M}, \\mathcal{M}_{t-1}) = \\| \\mathcal{M}_t - \\alpha \\mathcal{M}_{t-1} \\|_F^2\\). Using Equation Learning-Retaining Viewpoint and gradient descent as the optimizer (i.e., memory learning algorithm), the memory update rule is:" + }, + { + "type": "equation", + "bbox": [ + 0.442, + 0.845, + 0.92, + 0.863 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} _ {t} = \\alpha \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.868, + 0.92, + 0.915 + ], + "angle": 0, + "content": "When (1) \\(\\alpha = 1\\), memory update is equivalent to Linear Attention (LA) (Katharopoulos et al. 2020); (2) \\(\\alpha \\in \\mathbb{R}\\) is a learnable parameter, resulting architecture is either lightening attention (\\(n > 1\\)) (Li et al. 2025) or RetNet (\\(n = 1\\)) (Sun et al. 2023); and (3) \\(\\alpha_{t} \\in \\mathbb{R}\\) are data-dependent learnable parameters, resulting sequence model is Mamba2 (Dao et al. 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.093, + 0.921, + 0.184 + ], + "angle": 0, + "content": "RNNs with Delta Rule. To improve the memory management and to enhance the memory capacity of the above group, several studies suggest using delta rule (Neil et al. 2017; Schlag et al. 2021) as the learning algorithm in recurrent neural networks (e.g., DeltaNet (Schlag et al. 2021), Longhorn (Liu et al. 2024a), and RWKV7 (Peng et al. 2025b)). In this part, we recall that where \\(\\mathcal{M} \\in \\mathbb{R}^{d \\times n}\\), delta rule is equivalent to optimizing MSE objective \\(\\| \\mathcal{M}_t \\mathbf{k}_t - \\mathbf{v}_t \\|_2^2\\) with \\(\\mathrm{Ret}_t(\\mathcal{M}, \\mathcal{M}_{t-1}) = \\| \\mathcal{M}_t - \\alpha \\mathcal{M}_{t-1} \\|_F^2\\) as local retention, and stochastic gradient descent as optimizer: (\\(\\eta_t\\) is defined in Equation Learning-Retaining Viewpoint)" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.195, + 0.92, + 0.213 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} _ {t} = \\alpha \\left(\\mathbf {I} - \\eta_ {t} \\mathbf {k} _ {t} \\mathbf {k} _ {t} ^ {\\top}\\right) \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.224, + 0.921, + 0.268 + ], + "angle": 0, + "content": "When (1) \\(\\alpha = 1\\), memory update is equivalent to DeltaNet (Schlag et al. 2021); and (2) \\(\\alpha_{t} \\in \\mathbb{R}^{m}\\) are data-dependent learnable parameters, resulting sequence model is either Gated DeltaNet (Yang et al. 2024a) (\\(m = 1\\)), or RWKV7 (Peng et al. 2025b) (\\(m = d\\)). Therefore, RNNs with delta rule are special instances of MIRAS." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.282, + 0.922, + 0.389 + ], + "angle": 0, + "content": "Beyond Delta Rule. As discussed earlier, while delta rule with its value replacement strategy is more powerful than Hebbian-like learning rules, it suffers from theoretical limitations (Irie et al. 2023) and achieves moderate performance in practice (Yang et al. 2024c). Therefore, several studies have focused on update rules beyond delta rule. Recently, Titans (Behrouz et al. 2024c) suggests using non-linear MSE objective of \\(\\| \\mathcal{M}_t(\\mathbf{k}_t) - \\mathbf{v}_t\\| _2^2\\) with both local and global retention of \\(\\mathrm{D}_t = \\| W_t - W_{t - 1}\\| _F^2\\) and \\(\\mathrm{G}_t = \\| W_t\\| _2^2\\) and optimize it with gradient descent with momentum \\(^2\\). Therefore, Titans-LMM is a special instance of MIRAs, where we use the abovementioned attentional bias and retention regularizations, and gradient descent with momentum as the optimizer." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.396, + 0.92, + 0.456 + ], + "angle": 0, + "content": "Another example of such models is Mesa-layer, in which the model uses \\(\\sum_{i=1}^{t} \\|\\mathcal{M}_{t}(\\mathbf{k}_{i}) - \\mathbf{v}_{i}\\|_{2}^{2}\\) as the attentional bias objective with \\(\\|\\mathcal{M}_{t}\\|_{2}^{2}\\) as the retention regularization. Since these models use Newton's method to optimize such an objective, they provide a more expressive update rule than delta rule. We further discuss a set of new learning algorithms beyond delta rule in Section 5." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.47, + 0.921, + 0.515 + ], + "angle": 0, + "content": "Attention. As discussed by Sun et al. (2024), softmax attention is a non-parametric solution of \\(\\ell_2\\)-MSE loss function (i.e., \\(\\| W\\mathbf{k} - \\mathbf{v}\\| _2^2\\)) with Nadaraya-Watson estimator. Therefore, softmax attention is an instance of MIRAS, when we find the non-parametric solution to the MSE loss with Nadaraya-Watson estimator, without retention." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.538, + 0.75, + 0.558 + ], + "angle": 0, + "content": "5 Beyond Existing Attentional Biases and Retention Gates" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.57, + 0.92, + 0.721 + ], + "angle": 0, + "content": "As discussed in the previous section, existing work focuses only on linear/quadratic choices for the attentional bias or retention gate. In particular, the loss function \\( L(\\mathcal{M}(\\mathbf{k}_t),\\mathbf{v}_t) \\) is defined as \\( L(\\mathcal{M}(\\mathbf{k}_t),\\mathbf{v}_t) = c_t\\| \\mathcal{M}(\\mathbf{k}_t) - \\mathbf{v}_t\\|^2 \\) for some (learnable) constant \\( c_{t} \\) in prior work. Also the regularization term \\( R_{t}(W) \\) or the parametric \\( D_{t} \\) is considered as a quadratic/linear function. In addition, almost all prior work considers \\( W \\) to be the entire \\( \\mathbb{R}^d \\) space. However, in general there could be various choices for all the three aforementioned design choices. To illustrate the potential and flexibility of our designed framework, here, we review some of the potential design choices for attentional bias and retention gate in MirAS. For the sake of clarity, we discuss all these attentional bias and memory retention gates based on using gradient descent as the optimizer, and so based on the provided two view points. However, these attentional bias objectives and retention regularizers can be directly used in Equation 4 and optimized by using any other optimization algorithms, resulting in different update rules." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.739, + 0.431, + 0.755 + ], + "angle": 0, + "content": "5.1 Alternative Attentional Biases" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.769, + 0.92, + 0.83 + ], + "angle": 0, + "content": "Variant 1: \\(\\ell_p\\)-Attentional Bias. As discussed in the main body, attentional bias defines the \"similarity metric\" and measures how well memory can recall the value, given its corresponding key. Although \\(\\ell_2\\) regression loss often is a natural choice, it is sensitive to noise in the data. A natural extension is to use \\(\\ell_p\\)-norm class of objectives. That is, let \\(\\mathcal{M}\\) be the memory, \\(\\mathbf{k}\\) be the keys, and \\(\\mathbf{v}\\) be the values, we define \\(\\ell_p\\)-attentional bias as:" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.841, + 0.92, + 0.861 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\| \\mathcal {M} \\left(\\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {p} ^ {p}, \\tag {10}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.868, + 0.921, + 0.906 + ], + "angle": 0, + "content": "The retention gate (forget gate) in Titans is different from Mamba2 and Gated DeltaNet that we discussed above. The main difference comes from the case of full memory erase. While Mamba2 gating removes the entire memory and treats the next token as the first ever seen data, Titans use a \"cold start\" strategy and use the previous state of the memory to measure the surprise of the incoming token before fully erasing the memory." + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.093, + 0.921, + 0.139 + ], + "angle": 0, + "content": "where \\( p \\in \\mathbb{R}^{\\geq 1} \\) and \\( \\| . \\|_p \\) is the \\( p \\)-norm. Although depending on the distribution of the data, we might want to use different values of \\( p \\) (see Section 6), different values of \\( p \\) can result in memory architectures with interesting properties. For the sake of simplicity, let memory be a matrix, i.e., \\( W \\in \\mathbb{R}^{m \\times d} \\) and \\( \\mathcal{M}(W, \\mathbf{k}_t) = W\\mathbf{k}_t \\), the closed form can be derived as:" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.149, + 0.921, + 0.169 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = W _ {t} - p \\eta_ {t} \\left(\\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot | W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} | ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.179, + 0.39, + 0.194 + ], + "angle": 0, + "content": "Let \\(p = 1\\), the recurrence is simplified as:" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.205, + 0.921, + 0.222 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t} - \\eta_ {t} \\operatorname {S i g n} \\left(W _ {t} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top}, \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.234, + 0.919, + 0.264 + ], + "angle": 0, + "content": "which means that the memory has only two values of \\(-1\\) and \\(1\\). We call this variation value-less associative memory, in which we store entities (keys) but map them into two extreme class of \\(-1\\) and \\(+1\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.272, + 0.921, + 0.319 + ], + "angle": 0, + "content": "Remark 5. One of the critical challenges to use the above update rule is in the backpropagation process, in which \\(\\operatorname{Sign}(\\cdot)\\) and \\(|\\cdot|\\) are non-differentiable and so might cause unstable training. To overcome this issue, we use \\(\\operatorname{Sign}(x) \\approx \\tanh(\\alpha x)\\), and \\(|x| = \\sqrt{x^2 + \\epsilon}\\), as the smooth approximators of these functions." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.326, + 0.92, + 0.371 + ], + "angle": 0, + "content": "One simple interpretation for such behavior (i.e., value-less memory) is similar to the coping mechanism in humans (Loftus 1993), in which the memory does not store the values for extreme events. This interpretation of protective memory in extreme events motivates our next variant." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.385, + 0.92, + 0.462 + ], + "angle": 0, + "content": "Variant 2: Huber Loss: Memory with Coping Mechanism. While \\(\\ell_2\\)-norm objective is a common choice for many statistical and machine learning tasks, it is known to be sensitive to outliers and extreme samples. This sensitivity extends to the use of \\(\\ell_2\\) loss for attentional bias. To address this and drawing motivation from robust regression literature, we suggest utilizing the Huber loss-type (Hastie et al. 2009; Huber 1992) as the attentional bias, thereby reducing the negative impact of the outlier data on the memory learning process." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.468, + 0.919, + 0.498 + ], + "angle": 0, + "content": "We can apply Huber-type loss in three different ways: The first approach is to define the summation of the Huber loss across different coordinates as the total loss, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.506, + 0.652, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\ell (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\sum_ {j} \\mathcal {H} (\\mathcal {M} (W, \\mathbf {k} _ {t}) _ {j} - \\mathbf {v} _ {t, j}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.551, + 0.92, + 0.579 + ], + "angle": 0, + "content": "where \\(\\mathcal{M}(W,\\mathbf{k}_t)_j\\) and \\(\\mathbf{v}_{t,j}\\) denote the \\(j\\)-th coordinate of \\(\\mathcal{M}(W,\\mathbf{k}_t)\\) and \\(\\mathbf{v}_t\\) respectively. The function \\(\\mathcal{H}(\\cdot):\\mathbb{R}\\mapsto \\mathbb{R}\\) is the Huber loss defined as" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.578, + 0.92, + 0.61 + ], + "angle": 0, + "content": "\\[\n\\mathcal {H} (a) = \\left\\{ \\begin{array}{l l} \\frac {1}{2} a ^ {2} & \\text {i f} | a | \\leq \\delta \\\\ \\delta \\left(| a | - \\frac {1}{2} \\delta\\right) & \\text {i f} | a | > \\delta . \\end{array} \\right. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.615, + 0.92, + 0.647 + ], + "angle": 0, + "content": "Utilizing this attentional bias can lead to various memory update rules. For example, for the matrix form memory \\(\\mathcal{M}(W,\\mathbf{k}_t) = W\\mathbf{k}_t\\), the update rule is given by" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.656, + 0.921, + 0.683 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t - 1} - \\eta_ {t} \\left[ \\left(\\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {T}\\right) \\odot \\left(\\mathbf {I} \\left(\\left| W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| \\leq \\delta_ {t}\\right) \\mathbf {1} ^ {\\top}\\right) + \\left(\\delta_ {t} \\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} ^ {\\top}\\right) \\odot \\left(\\mathbf {I} \\left(\\left| W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| > \\delta_ {t}\\right) \\mathbf {1} ^ {\\top}\\right) \\right] \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.693, + 0.92, + 0.723 + ], + "angle": 0, + "content": "In this formulation, the parameter \\(\\delta_t\\) decides the type of the memory used for each block of memory (\\(\\ell_2\\)-norm objective or value-less) based on the context, making the memory more robust to outliers." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.731, + 0.771, + 0.746 + ], + "angle": 0, + "content": "The second approach is to define the Huber-type loss based on the \\(\\ell_2\\) loss over all coordinates, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.758, + 0.644, + 0.774 + ], + "angle": 0, + "content": "\\[\n\\ell (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\mathcal {H} (\\| \\mathcal {M} (W, \\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| _ {2}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.786, + 0.92, + 0.817 + ], + "angle": 0, + "content": "For simplicity of derivations, assume matrix memory \\( M(W,\\mathbf{k}_t) = W\\mathbf{k}_t \\). Then using gradient descent for updating memory leads the memory update rule" + }, + { + "type": "equation", + "bbox": [ + 0.27, + 0.827, + 0.921, + 0.868 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t - 1} - \\eta_ {t} \\left\\{ \\begin{array}{l l} \\left(\\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {T} & \\text {i f} \\| \\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} \\leq \\delta_ {t}, \\\\ \\delta_ {t} \\frac {\\left(\\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right)}{\\| \\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2}} \\mathbf {k} _ {t} ^ {T} & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.878, + 0.92, + 0.909 + ], + "angle": 0, + "content": "Again, in the form (15), the parameter \\(\\delta_t\\) decides the type of the memory used (\\(\\ell_2\\)-norm objective or normalized version) based on the context, making the memory more robust to outliers." + }, + { + "type": "page_number", + "bbox": [ + 0.512, + 0.938, + 0.522, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.093, + 0.92, + 0.123 + ], + "angle": 0, + "content": "Finally, in the third approach, we present a smooth mixture method, in which the memory decides if for an incoming data it is better to use \\(\\ell_2\\) or \\(\\ell_1\\) attentional bias:" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.131, + 0.92, + 0.171 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t - 1} - \\left\\{ \\begin{array}{l l} \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {i f} \\| \\mathcal {M} (\\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| \\leq \\delta_ {t}, \\\\ \\eta_ {t} \\delta_ {t} \\nabla \\ell_ {1} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.179, + 0.423, + 0.194 + ], + "angle": 0, + "content": "The role of parameter \\(\\delta_t\\) is the same as above." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.207, + 0.921, + 0.268 + ], + "angle": 0, + "content": "Variant 3: Memory Robust to Value Shifts. Following the robustness requirement discussed in the previous section, we aim to design a memory mechanism that exhibits resilience against small shifts in the value parameter. A natural approach in this context is to employ a robust optimization formulation. Specifically, we define the loss function as the worst-case \\(\\ell_2\\) distance between the predicted memory output and the perturbed true value:" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.276, + 0.92, + 0.307 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\max _ {\\| \\delta \\mathbf {v} _ {t} \\| _ {2} \\leq \\Delta} \\frac {1}{2} \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\left(\\mathbf {v} _ {t} + \\boldsymbol {\\delta} \\mathbf {v} _ {t}\\right) \\| _ {2} ^ {2}. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.314, + 0.92, + 0.359 + ], + "angle": 0, + "content": "This formulation seeks the memory parameters \\( W \\) that perform well even under the adverse local perturbation of the true value \\( \\mathbf{v}_t \\) within an \\( \\ell_2 \\) ball of radius \\( \\Delta \\). To solve the maximization problem in (17), we find the optimal perturbation \\( \\delta \\mathbf{v}_t^* \\). By solving this problem with respect to \\( \\delta \\mathbf{v}_t \\), we arrive at:" + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.366, + 0.609, + 0.4 + ], + "angle": 0, + "content": "\\[\n\\delta \\mathbf {v} _ {t} ^ {*} = \\Delta \\frac {- \\mathcal {M} (W , \\mathbf {k} _ {t}) + \\mathbf {v} _ {t}}{\\| \\mathcal {M} (W , \\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| _ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.406, + 0.757, + 0.421 + ], + "angle": 0, + "content": "Substituting this optimal perturbation back into the loss function (17), we obtain the robust loss:" + }, + { + "type": "equation", + "bbox": [ + 0.281, + 0.429, + 0.75, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\frac {1}{2} \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} ^ {2} + \\Delta \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} + \\frac {1}{2} \\Delta^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.464, + 0.92, + 0.51 + ], + "angle": 0, + "content": "This robust loss function is a combination of the standard \\(\\ell_2\\) loss and a term proportional to the \\(\\ell_2\\) norm of the error, scaled by the robustness parameter \\(\\Delta\\). The value of \\(\\Delta\\) thus controls the trade-off between fitting the nominal data and ensuring robustness against value perturbations." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.517, + 0.92, + 0.577 + ], + "angle": 0, + "content": "For simplicity of the derivations, let us consider a constant value for \\(\\Delta\\), an Euclidean retention gate \\(\\mathrm{Ret}_t(W,W_{t - 1}) = \\| W - W_{t - 1}\\|^2\\), and an attentional bias term \\(\\widetilde{\\ell} (W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{t - 1},\\nabla \\ell (W_{t - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle\\). Furthermore, to simplify the memory operation, we assume a linear matrix memory model \\(\\mathcal{M}(W,\\mathbf{k}_t) = W\\mathbf{k}_t\\). Under these assumptions, we can derive the memory update mechanism using gradient descent on the robust loss:" + }, + { + "type": "equation", + "bbox": [ + 0.289, + 0.595, + 0.741, + 0.629 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t - 1} - \\eta \\left(\\left(\\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top} + \\Delta \\frac {\\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}}{\\| \\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2}} \\mathbf {k} _ {t} ^ {\\top}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.632, + 0.919, + 0.663 + ], + "angle": 0, + "content": "In this update rule, the parameter \\(\\Delta\\), which governs the influence of the robustness term, can also be treated as a learnable parameter, allowing the model to adapt its robustness based on the observed data." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.68, + 0.41, + 0.696 + ], + "angle": 0, + "content": "5.2 Alternative Retention Gates" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.711, + 0.92, + 0.77 + ], + "angle": 0, + "content": "Variant 1: Memorization Over A Scaled Probability Simplex Via \\( f \\)-Divergence. A common technique in learning to prevent numerical instabilities and exploding values is to restrict the search space to a bounded domain. Following this principle, to avoid numerical instabilities, we can constrained the variable \\( W_{t} \\) to lie within a (scaled) probability simplex. In other words, we can restrict the state to lie in the constraint set" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.78, + 0.658, + 0.797 + ], + "angle": 0, + "content": "\\[\n\\mathcal {W} = \\{W \\mid \\| W \\| _ {1} = c \\text {a n d} W _ {j l} \\geq 0, \\forall j, l \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.806, + 0.922, + 0.867 + ], + "angle": 0, + "content": "In this set, each matrix \\( W \\) can be viewed as a measure. Thus, in (Learning-Retaining Viewpoint), we can utilize divergences over measures to define our premetric. For example, we can use \\( f \\)-divergence measure (Polyanskiy et al. 2025, Def 4.9), (Csiszar 1967) to define \\( \\mathrm{D}_t(\\cdot, \\cdot) \\). More specifically, let \\( f(\\cdot) \\) be a smooth strictly convex function from \\( \\mathbb{R}^+ \\) to \\( \\mathbb{R} \\) with \\( f(1) = 0 \\). Then, we can define the \\( f \\)-divergence between \\( W \\) and \\( W' \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.411, + 0.874, + 0.619, + 0.915 + ], + "angle": 0, + "content": "\\[\n\\mathrm {D} _ {t} (W, W ^ {\\prime}) = \\sum_ {j l} W _ {j l} ^ {\\prime} f \\left(\\frac {W _ {j l}}{W _ {j l} ^ {\\prime}}\\right).\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.093, + 0.921, + 0.137 + ], + "angle": 0, + "content": "It is known that \\( f \\)-divergence is zero if and only if \\( W = W' \\); see Polyanskiy et al. 2025, Theorem 2.3. Using the above premetric as the retention gate and setting \\( \\widetilde{\\ell}(W; \\mathbf{k}_t, \\mathbf{v}_t) = \\langle W - W_{t-1}, \\nabla \\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\rangle \\) in (Learning-Retaining Viewpoint), we get the update rule" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.139, + 0.92, + 0.154 + ], + "angle": 0, + "content": "\\[\nW _ {t} = W _ {t - 1} \\odot g \\left(- \\zeta_ {t} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right). \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.161, + 0.92, + 0.267 + ], + "angle": 0, + "content": "Here \\( g(\\cdot) \\) is the inverse of the mapping \\( f' \\), i.e., \\( g(f'(\\tau)) = \\tau \\), \\( \\forall \\tau \\); the operator \\( \\odot \\) denotes the Hadamard (elementwise) product, and \\( \\zeta_t \\) should be chosen such that \\( \\| W_t\\|_1 = c \\). Notice that since the function \\( f(\\cdot) \\) is strictly convex and smooth, its derivative is strictly increasing and hence \\( g(\\cdot) \\) is well defined. Conversely, for any strictly monotone function \\( g(\\cdot) \\), we can find its inverse function \\( g^{-1} \\) (which is strictly increasing) and define \\( f(\\tau) = \\mathrm{const} + \\int_{\\tau' = 0}^{\\infty}g^{-1}(\\tau')d\\tau' \\). The term const should be chosen such that \\( f(1) = 0 \\). Then the update rule in (18) can be interpreted by the \\( f \\)-divergence regularization, as explained above. Therefore, one can directly choose a continuous monotonically increasing function \\( g(\\cdot) \\) and use (18) for memory update." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.281, + 0.921, + 0.363 + ], + "angle": 0, + "content": "Specializing to KL divergence. Let us further make the above update rule explicit by using special function \\( f \\). If we choose \\( f(\\tau) = \\tau \\ln(\\tau) \\), then the \\( f \\)-divergence becomes the widely used KL divergence measure \\( D_t(W, W_{t-1}) = \\sum_{jl} W_{jl} \\log \\left( \\frac{W_{jl}}{(W_t)_{jl}} \\right) \\). In addition, we can also utilize the Shannon entropy as the global retention by regularizing deviations from uniform distribution, i.e., \\( G_t(W) = \\sum_{jl} W_{jl} \\log (W_{jl}) \\). Combining these choices of the local and global retention gates, we obtain the overall retention gate" + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.363, + 0.724, + 0.401 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e t} _ {t} (W, W _ {t - 1}) = \\frac {1}{\\eta_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(\\frac {W _ {j l}}{\\left(W _ {t}\\right) _ {j l}}\\right) + \\frac {1}{\\alpha_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(W _ {j l}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.408, + 0.919, + 0.44 + ], + "angle": 0, + "content": "Choosing the attentional bias \\(\\widetilde{\\ell}(W; \\mathbf{k}_t, \\mathbf{v}_t) = \\langle W - W_{t-1}, \\nabla \\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\rangle\\) and the above retention gate will lead to the update rule" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.45, + 0.92, + 0.488 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\arg \\min _ {W} \\left\\langle W - W _ {t - 1}, \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) \\right\\rangle + \\frac {1}{\\eta_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(\\frac {W _ {j l}}{\\left(W _ {t}\\right) _ {j l}}\\right) + \\frac {1}{\\alpha_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(W _ {j l}\\right) \\tag {19}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.491, + 0.92, + 0.525 + ], + "angle": 0, + "content": "\\[\n\\text {s . t .} \\quad \\sum_ {j l} W _ {j l} = c, W _ {j l} \\geq 0, \\forall j l \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.536, + 0.68, + 0.551 + ], + "angle": 0, + "content": "Attaching the Lagrange multiplier to the first constraint, the KKT conditions imply" + }, + { + "type": "equation", + "bbox": [ + 0.247, + 0.562, + 0.787, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\left(\\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) _ {j l} + \\left(\\frac {1}{\\eta_ {t}} + \\frac {1}{\\alpha_ {t}}\\right) \\left(1 + \\log W _ {j l}\\right) - \\frac {1}{\\eta_ {t}} \\log \\left(\\left(W _ {t - 1}\\right) _ {j l}\\right) + \\mu_ {t} = 0, \\quad \\forall j, l\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.608, + 0.919, + 0.64 + ], + "angle": 0, + "content": "where \\(\\mu_t\\) should be chosen such that \\(\\sum_{jl} W_{jl} = c\\). Rearranging the terms and defining \\(\\lambda_t = \\frac{1 / \\alpha_t}{1 / \\alpha_t + 1 / \\eta_t}\\), \\(\\eta_t' = \\frac{1}{1 / \\alpha_t + 1 / \\eta_t}\\), we get the update rule" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.641, + 0.92, + 0.657 + ], + "angle": 0, + "content": "\\[\nW _ {t} \\leftarrow c \\operatorname {S o f t m a x} \\left(\\left(1 - \\lambda_ {t}\\right) \\log \\left(W _ {t - 1}\\right) - \\eta_ {t} ^ {\\prime} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.664, + 0.92, + 0.694 + ], + "angle": 0, + "content": "where \\(\\lambda_t \\in (0,1)\\) and \\(\\eta' \\in \\mathbb{R}^+\\) are the parameters that can be learned during training. The Softmax operator ensures that the output lies in the set \\(\\mathcal{W}\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.702, + 0.919, + 0.732 + ], + "angle": 0, + "content": "Notice that while all above calculations are done for a matrix \\( W \\), similar update rule holds for other forms of parameters such as when \\( W \\) is a neural network (or when the parameter \\( W \\) is normalized per slice)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.745, + 0.92, + 0.821 + ], + "angle": 0, + "content": "Variant 2: Elastic Net Regularization: Hard and Soft Forgetting. Elastic net is a powerful and popular tool in regression analysis to balance the feature selection capabilities of LASSO (Tibshirani 1996) and bias reduction properties of Ridge regression (Hilt et al. 1977; Hoerl et al. 1970). It has been widely used in different applications due to its ability to handle high-dimensional data and mitigate the effects of multicollinearity. Given this success, a natural question is what happens if we use this regularization scheme in our context." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.829, + 0.933, + 0.874 + ], + "angle": 0, + "content": "Let us start based on (Learning-Retaining Viewpoint) to design our memorization scheme. In (Learning-Retaining Viewpoint), we discussed that the loss function \\(\\widetilde{\\ell_t} (W;\\mathbf{k}_t,\\mathbf{v}_t)\\) is an approximation of the original function \\(\\ell (\\cdot)\\), measuring our goodness-of-fit. Regularizing this loss with elastic net regularizer, we obtain the approximation" + }, + { + "type": "equation", + "bbox": [ + 0.366, + 0.884, + 0.666, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\widetilde {\\ell} _ {t} (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\langle W - W _ {t - 1}, \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\rangle .\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.525, + 0.949 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.921, + 0.137 + ], + "angle": 0, + "content": "with a global retention of \\( \\mathrm{G}_t(W) = \\frac{1}{2\\beta} \\| W\\| _2^2 +\\frac{1}{\\alpha}\\| W\\| _1 \\). To fully specify the update rule of (Learning-Retaining Viewpoint), we also need to specify the premetric functions \\( \\mathrm{D}_t(\\cdot ,\\cdot) \\). For the sake of keeping the update rule simple (and parallelizable), we can choose" + }, + { + "type": "equation", + "bbox": [ + 0.41, + 0.136, + 0.622, + 0.164 + ], + "angle": 0, + "content": "\\[\n\\mathrm {D} _ {t} (W, W _ {t - 1}) = \\frac {1}{2} \\| W - W _ {t - 1} \\| _ {2} ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.168, + 0.717, + 0.184 + ], + "angle": 0, + "content": "These choices of the attentional bias and retention gate leads to the following update rule:" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.206, + 0.92, + 0.222 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\mathcal {S} _ {Y} \\left(\\lambda W _ {t - 1} - \\zeta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right), \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.228, + 0.919, + 0.262 + ], + "angle": 0, + "content": "where \\(\\gamma = \\frac{\\eta\\beta}{\\alpha(\\eta + \\beta)}\\), \\(\\lambda = \\frac{\\beta}{\\beta + \\eta}\\), \\(\\zeta = \\eta\\lambda\\), and \\(S_{\\gamma}\\) is the soft thresholding operator, applied element-wise. For each element, this operator is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.263, + 0.626, + 0.28 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {\\gamma} (z) = \\operatorname {s i g n} (z) \\max \\left\\{0, | z | - \\gamma \\right\\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.285, + 0.921, + 0.317 + ], + "angle": 0, + "content": "In other words, for large values of \\( z \\), \\( S_{\\gamma}(z) \\) makes \\( z \\) closer to zero by \\( \\gamma \\) amount. If it is already in the \\( \\gamma \\)-vicinity of zero, then it makes it zero (hard forget)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.323, + 0.92, + 0.391 + ], + "angle": 0, + "content": "Equation (22) can be viewed as a combination of soft forgetting (obtained by multiplying \\( W \\) by \\( \\lambda \\in (0,1) \\), and a hard forgetting (if it is smaller than \\( \\gamma \\)). The hyperparameters \\( \\gamma, \\lambda, \\) and \\( \\zeta \\) can be learned. Notice that since the shrinkage operator is not differentiable, we can approximate it with its smooth approximation. For example, we can use \\( S_{\\gamma}(z) \\approx \\frac{|z|*\\arctan(z / \\gamma)}{\\pi / 2} \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.402, + 0.92, + 0.432 + ], + "angle": 0, + "content": "Variant 3: Elastic Net Regularization: Forgetting via Soft-thresholding. The elastic net regularizer can also be used in the (FTRL Viewpoint). In particular, in (FTRL Viewpoint), we can set" + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.442, + 0.617, + 0.474 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{\\eta_ {t}} R _ {t} (W) = \\frac {1}{\\eta} \\| W \\| ^ {2} + \\frac {1}{\\alpha} \\| W \\| _ {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.485, + 0.92, + 0.517 + ], + "angle": 0, + "content": "and use \\(\\widehat{\\ell}(W; x_i) = \\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; x_i) \\rangle\\). Assuming initialization at \\(W_0 = 0\\), these choices of attentional bias and retention gate leads to the update rules:" + }, + { + "type": "equation", + "bbox": [ + 0.414, + 0.529, + 0.616, + 0.544 + ], + "angle": 0, + "content": "\\[\nA _ {t} = A _ {t - 1} - \\eta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.547, + 0.918, + 0.565 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\mathcal {S} _ {\\eta / \\alpha} (A _ {t}) \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.575, + 0.871, + 0.591 + ], + "angle": 0, + "content": "Here \\(S_{\\eta /\\alpha}(\\cdot)\\) is the soft-thresholding operator with parameter \\(\\eta /\\alpha\\) , which can be smoothly as explained in Variant 1.1." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.603, + 0.919, + 0.651 + ], + "angle": 0, + "content": "Variant 4: General \\( L_{q} \\) Memory Stability. Existing work is based on the retention gate choices \\( \\mathrm{D}_t(W, W_{t-1}) = \\|W - W_{t-1}\\|_F^2 \\) or \\( R(W) = \\|W\\|_2^2 \\). However, one can choose other choices of retention gate. For example, in (FTRL Viewpoint), we can choose \\( L_{q} \\) norm as the regularizer \\( R(W) \\). More specifically, for \\( 1 < q \\leq 2 \\), we can set" + }, + { + "type": "equation", + "bbox": [ + 0.422, + 0.66, + 0.609, + 0.692 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{\\eta_ {t}} R (W) = \\frac {1}{2 \\eta (q - 1)} \\| W \\| _ {q} ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.703, + 0.92, + 0.772 + ], + "angle": 0, + "content": "Using this retention gate and choosing \\(\\widehat{\\ell_i} (W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{i - 1},\\nabla \\ell (W_{i - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle\\) in (FTRL Viewpoint), leads to the update rule \\(W_{t} = -\\eta \\frac{A_{t}}{\\|A_{t}\\|_{p}^{p - 2}}\\), where \\(p = \\frac{q}{q - 1}\\) and \\(A_{t} = \\sum_{i = 1}^{t}\\nabla \\ell (W_{i - 1};\\mathbf{k}_{t},\\mathbf{v}_{t})\\); see Shalev-Shwartz et al. 2012, Section 2.6. Here, \\(\\odot\\) denotes the Hadamard (element-wise) product and \\(|\\cdot |\\) is the element-wise absolute value operator. Assuming \\(W_0 = 0\\), this update rule can be recursively written as:" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.781, + 0.698, + 0.817 + ], + "angle": 0, + "content": "\\[\nA _ {t} = A _ {t - 1} - \\eta \\nabla \\ell \\left(W _ {i - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\quad \\text {a n d} \\quad W _ {t} = \\frac {A _ {t}}{\\| A _ {t} \\| _ {p} ^ {p - 2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.841, + 0.92, + 0.887 + ], + "angle": 0, + "content": "Variant 5: Bregman Divergence as Retention Gate.. Another natural choice is to use Bregman divergence as retention gate, leading to a mirror descent-type algorithms. In particular, given a smooth strictly convex function \\( f(\\cdot): \\mathbb{R} \\mapsto \\mathbb{R} \\), we can define the function \\( F(W) = \\sum_{jl} f(W_{jl}) \\). Based on this choice of function \\( F \\), we define the Bregman divergence" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.898, + 0.667, + 0.914 + ], + "angle": 0, + "content": "\\[\nD _ {t} (W, W ^ {\\prime}) = F (W) - F \\left(W ^ {\\prime}\\right) - \\langle W ^ {\\prime}, W - W ^ {\\prime} \\rangle\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.92, + 0.123 + ], + "angle": 0, + "content": "as our parametric function. Utilizing this retention gate and choosing \\(\\widetilde{\\ell}_t(W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{t - 1},\\nabla \\ell (W_{t - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle\\) in (Learning-Retaining Viewpoint), we obtain the update rule" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.133, + 0.652, + 0.15 + ], + "angle": 0, + "content": "\\[\nW _ {t} = g \\left(- \\eta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) + F ^ {\\prime} \\left(W _ {t - 1}\\right)\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.159, + 0.92, + 0.19 + ], + "angle": 0, + "content": "Here, \\( F' \\) is the mapping obtained by applying \\( f'(\\cdot) \\) (the derivative of \\( f \\)) element-wise to all entries of its input matrix argument. The function \\( g \\) is the inverse of the mapping \\( F'(\\cdot) \\), i.e., \\( g(F'(W)) = W \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.198, + 0.922, + 0.308 + ], + "angle": 0, + "content": "If we choose \\( f(\\tau) = \\frac{\\tau^2}{2} \\), then \\( F'(W) \\) becomes the identity mapping and so is \\( g \\). Therefore, the above update becomes simple gradient descent with no nonlinearity involved in the update rule. However, other choices of \\( f(\\cdot) \\) introduces additional nonlinearity in \\( g(\\cdot) \\), which can enhance the expressivity of our memory. For example, we can choose the function \\( f(\\cdot) \\) so that its derivative becomes the inverse sigmoid function, i.e., \\( f'(\\tau) = \\ln \\left( \\frac{\\tau}{1 - \\tau} \\right) \\) with \\( f': (0,1) \\mapsto \\mathbb{R} \\). Since \\( f'(\\cdot) \\) is strictly increasing, then the function \\( f(\\cdot) \\) (and hence \\( F(\\cdot) \\)) is strictly convex. Therefore, the Bregman divergence is well defined. Moreover, the inverse of the function \\( f'(\\cdot) \\) becomes the sigmoid function, i.e., \\( g(\\tau) = \\sigma(\\tau) = \\frac{\\exp(\\tau)}{1 + \\exp(\\tau)} \\) with \\( g: \\mathbb{R} \\mapsto (0,1) \\). Then, the update of the memory becomes" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.317, + 0.66, + 0.352 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\sigma \\left(\\ln \\left(\\frac {W _ {t}}{1 - W _ {t}}\\right) - \\eta \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t})\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.361, + 0.92, + 0.392 + ], + "angle": 0, + "content": "where \\(\\sigma\\) is the sigmoid function operated element-wise on the entries of \\(W\\), and the division operator \\(\\frac{W_t}{1 - W_t}\\) is also performed element-wise. This update rule guarantees that the elements of \\(W_t\\) remain within the interval \\((0, 1)\\)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.41, + 0.58, + 0.427 + ], + "angle": 0, + "content": "5.3 MIRAs's Variants: MONETA, YAAD, and MEMORA" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.435, + 0.92, + 0.482 + ], + "angle": 0, + "content": "In the previous section we discussed different potential choices for attentional bias and retention gate to show the generality and the potential of MIRAs. In this section, building upon our framework, we present three novel sequence models, each of which designed based on a different motivation, and discuss how they can leverage fast parallel training." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.493, + 0.92, + 0.587 + ], + "angle": 0, + "content": "MOnETA. Given \\(p,q\\in \\mathbb{R}^{\\geq 1}\\), we design \\((p,q)\\)-MONETA as the variant of MIRAs as follows: (1) For the choice of memory architecture, we use an MLP with 2 layers with expansion factor of 4 and GELU activation function (Hendrycks et al. 2016). We also use residual connections and layer norm, resulting in \\(\\mathcal{M}(x) = x + \\mathsf{LN}(W_1\\sigma (W_2x))\\). (2) We choose \\(\\ell_p\\)-attentional bias (introduced in Equation 11) for MONETA. (3) For the choice of retention gate, we use the hybrid of \\(\\ell_q\\) retention gate \\(\\frac{1}{2(q - 1)}\\| W\\| _q^2\\) (see Section 5.2 for details) and the standard \\(\\ell_2\\) regularization \\(\\frac{1}{\\beta}\\| W\\| _2^2\\). (4) Finally, we use gradient descent as the memory learning algorithm. The above choices, result in the following recurrent formula for the memory module:" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.594, + 0.92, + 0.631 + ], + "angle": 0, + "content": "\\[\nA _ {t} = \\alpha_ {t} A _ {t - 1} - \\eta_ {t} \\nabla \\ell_ {p} \\left(W _ {i - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\quad \\text {a n d} \\quad W _ {t} = \\frac {A _ {t}}{\\| A _ {t} \\| _ {q} ^ {q - 2}}. \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.639, + 0.421, + 0.654 + ], + "angle": 0, + "content": "Notably the gradient can be calculated using:" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.663, + 0.92, + 0.681 + ], + "angle": 0, + "content": "\\[\n\\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = p \\eta_ {t} \\left(\\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot | W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} | ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top}. \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.69, + 0.263, + 0.707 + ], + "angle": 0, + "content": "We use \\((p,q) = (3,4)\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.718, + 0.92, + 0.826 + ], + "angle": 0, + "content": "YAAD. Building upon our discussion on the importance of robust memory that protects itself from extreme events (tokens), we design YAAD based on Huber objective. That is, in MirAS, for the choice of memory structure, we follow MONETA and use an MLP with the same architecture as above; for the choice of attentional bias, we use Huber loss (defined in Equation 16); for the choice retention gate, for the sake of simplicity, we use a combination of local and global retention as \\(\\mathrm{Ret}_t(W,W_{t - 1}) = \\frac{1}{2\\theta_t}\\| W - W_{t - 1}\\| _F^2 +\\frac{1}{\\beta_t}\\| W\\| _2^2\\) , which is equivalent to the \"forget gate\" mechanism introduced by Behrouz et al. (2024c); and finally, we simply use gradient descent as the memory learning algorithm. Given the above choices, we can write the resulted memory learning process as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.835, + 0.92, + 0.876 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\alpha_ {t} W _ {t - 1} - \\left\\{ \\begin{array}{l l} \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {i f} \\| \\mathcal {M} (\\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| \\leq \\delta_ {t}, \\\\ \\eta_ {t} \\delta_ {t} \\nabla \\ell_ {1} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {26}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.883, + 0.92, + 0.915 + ], + "angle": 0, + "content": "Note that for improving the expressive power, in all architectures, we decouple the learning rate \\(\\eta\\) and the retention gate rate \\(\\alpha\\), resulting in an independent parameter \\(\\beta_{t} \\in [0,1]^{d}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.09, + 0.345, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.089, + 0.532, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.09, + 0.852, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.366, + 0.92, + 0.398 + ], + "angle": 0, + "content": "Figure 2: Visualization of the MirAs's variant architecture, their hybrid counterpart with SWA, and block design of MirAs layer." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.424, + 0.92, + 0.5 + ], + "angle": 0, + "content": "MEMORA. Finally, in MEMORA, we use the idea of elastic net regularization (i.e., hard and soft retention). To this end, in Miras: (1) For the choice of memory architecture, similar to above variants, we use an MLP (the same architecture as the previous variants). (2) For the choice of attentional bias, we use simple \\(\\ell_2\\) regression loss. (3) For the choice of retention gate we use KL divergence as in Equation 21. (4) Finally, we optimize the memory using gradient descent, resulting in the following update rule:" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.534, + 0.92, + 0.551 + ], + "angle": 0, + "content": "\\[\nW _ {t} = \\operatorname {S o f t m a x} \\left(\\alpha_ {t} \\log \\left(W _ {t - 1}\\right) - \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) \\tag {27}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.581, + 0.529, + 0.599 + ], + "angle": 0, + "content": "5.4 Architecture Backbone and Fast Training" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.611, + 0.92, + 0.718 + ], + "angle": 0, + "content": "Architectural Backbone. For the architectural backbone, we fully follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a): We replace attention modules with our variants of MIRAs in Llama's macro architecture with MLPs with SwiGLU(. ) activation, rotary positional encodings (RoPE) (Su et al. 2024), and RMSNorm (Zhang et al. 2019). For MIRAs layer block, we follow the recent modern linear recurrent models (Behrouz et al. 2024c; Yang et al. 2024a), and incorporate a 1D depthwise-separable convolution layer (with kernel size of 4) after each of the query, key, and value projections. For the sake of training stability, we also use \\(\\ell_2\\) normalization to \\(\\mathbf{q}\\) and \\(\\mathbf{k}\\). The output of MIRAs layer block is normalized and gated with a linear layer (Mehta et al. 2023)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.731, + 0.92, + 0.792 + ], + "angle": 0, + "content": "Channel-wise Parameters. For learnable parameters of \\(\\eta_t, \\delta_t\\) and the retention gate of \\(\\alpha_t\\) we use channel-wise parametrization, i.e., \\(\\eta_t, \\delta_t, \\alpha_t \\in \\mathbb{R}^d\\). While gaining more expressive power, this parametrization results in significant parameter increase. To mitigate this issue, following Peng et al. (2025b), we use low-rank projections to project the input into \\(\\mathbb{R}^k\\) and then to \\(\\mathbb{R}^d\\), where \\(k\\) is a hyperparameter (usually 32 or 64). The backbone architecture is illustrated in Figure 2." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.805, + 0.919, + 0.851 + ], + "angle": 0, + "content": "Hybrid Models. We also evaluate the hybrid version of Miras's variants. For hybrid models, we follow the Samba (Ren et al. 2024) architecture, in which we sequentially combine our Miras layer with Sliding Window Attention (SWA). The illustration of hybrid model Figure 2." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.863, + 0.92, + 0.91 + ], + "angle": 0, + "content": "Parallelizable Training. While the design of Miras's variant are theoretically well-motivated, their recurrence is non-linear, potentially making their straightforward training slow for large scales. In this section, we build upon the work of Behrouz et al. (2024c) and Sun et al. (2024) to make the training parallelizable. The main idea is to divide the sequence into" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.093, + 0.92, + 0.138 + ], + "angle": 0, + "content": "chunks with size \\( b \\) (usually is 16 or 64) and calculate the gradient for all tokens in the current chunk with respect to the last state of the memory in the previous chunk. That is, we use \\( \\nabla \\ell(\\mathcal{M}_{t'}; \\mathbf{k}_t, \\mathbf{v}_t) \\) instead of \\( \\nabla \\ell(\\mathcal{M}_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\), where \\( t' \\) is the last state in the previous chunk." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.145, + 0.92, + 0.207 + ], + "angle": 0, + "content": "Given the above trick, we can calculate all gradients at once and make the recurrence inside each chunk linear. However, to fully take advantage of accelerators, we need to reformulate the process as matrix multiplication. For MONETA, for the sake of clarity, assume \\( q = 2 \\). We follow the same algorithm as Behrouz et al. (2024c) and expand the recurrence as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.219, + 0.92, + 0.276 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {M} _ {t} = \\alpha_ {t} \\mathcal {M} _ {t - 1} - \\eta_ {t} \\nabla \\ell (\\mathcal {M} _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\\\ = \\beta_ {t} \\mathcal {M} _ {0} - \\sum_ {i = 1} ^ {t} \\eta_ {i} \\frac {\\beta_ {t}}{\\beta_ {i}} \\nabla \\ell \\left(\\mathcal {M} _ {t ^ {\\prime}}; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right), \\tag {28} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.288, + 0.92, + 0.334 + ], + "angle": 0, + "content": "where \\(t' = t - \\mathrm{mod}(t, b)\\), and \\(\\beta_{i} = \\prod_{j=1}^{i} \\alpha_{j}\\). For the sake of clarity, we focus on the first chunk, i.e., \\(t = b\\) and so \\(t' = 0\\), and explain the process for the case that \\(\\mathcal{M}_t = W_t\\) is linear. The process for 2-layer MLPs and other chunks is similar. Using \\(\\ell_p\\) loss function, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.345, + 0.92, + 0.405 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla \\ell \\left(W _ {0}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = p \\left(\\operatorname {S i g n} \\left(W _ {0} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot \\left| W _ {0} \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top} \\\\ \\Rightarrow \\sum_ {i = 1} ^ {b} \\eta_ {i} \\frac {\\beta_ {b}}{\\beta_ {i}} \\nabla \\ell \\left(W _ {0};; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) = p \\mathbf {E} _ {b} \\odot \\mathbf {B} _ {b} \\odot \\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot \\left(\\left| W _ {0} \\mathbf {K} - \\mathbf {V} \\right| ^ {p - 1}\\right) \\mathbf {K} ^ {\\top}, \\tag {29} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.418, + 0.92, + 0.485 + ], + "angle": 0, + "content": "where \\(\\mathbf{E}_b = \\left[\\eta_1\\quad \\eta_2\\quad \\dots \\quad \\eta_b\\right]\\) and \\(\\mathbf{B}_b\\) is defined analogously on \\(\\frac{\\beta_b}{\\beta_i}\\mathrm{s}\\). For the sake of stability in training, we use \\(\\operatorname{Sign}(x)\\approx \\tanh (\\alpha x)\\) and \\(|x| = \\sqrt{x^2 + \\epsilon}\\), where \\(\\epsilon >0\\) is a small number (i.e., \\(\\epsilon = 1e - 6\\)). As discussed in Equation 24, the case that \\(q\\neq 2\\) appears as a normalization term on the memory. Similar to Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024), we do not apply this non-linearity inside each chunk and instead use it at the end of each chunk." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.492, + 0.919, + 0.524 + ], + "angle": 0, + "content": "For YAAD, the process is very similar to the above. We calculate the gradient of both \\(\\ell_1\\) and \\(\\ell_2\\) loss and use a masking based on \\(\\| \\mathcal{M}(\\mathbf{k}_t) - \\mathbf{v}_t\\| \\leq \\delta_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.53, + 0.92, + 0.605 + ], + "angle": 0, + "content": "For MEMORA, the update rule has two non-linear parts, i.e., softmax and log, making the model hardly parallelizable. To this end, as discussed above, we use its linear version inside each chunk and its non-linear version across chunks. However, using both log and softmax at the end of each chunk removes the effect of log. To this end, we consider a lag tokens after each chunk (i.e., tokens with index \\( i = kb + 1 \\), where \\( b \\) is the chunk size and \\( k \\in \\mathbb{Z}^+ \\)). That is, let \\( \\mathcal{M}_0 \\) be the last state of the memory in previous chunk, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.352, + 0.618, + 0.92, + 0.633 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} _ {1} = \\operatorname {S o f t m a x} \\left(\\alpha_ {1} \\log \\left(\\mathcal {M} _ {0}\\right) - \\eta_ {1} \\nabla \\ell_ {2} \\left(\\mathcal {M} _ {0}; \\mathbf {k} _ {1}, \\mathbf {v} _ {1}\\right)\\right), \\tag {30}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.644, + 0.871, + 0.66 + ], + "angle": 0, + "content": "and then we use \\(\\mathcal{M}_1\\) for the next chunk. Again, for the sake of clarity, assume that memory is linear, i.e., \\(\\mathcal{M}_1 = W_1\\):" + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.671, + 0.92, + 0.73 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla \\ell \\left(W _ {1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = \\left(W _ {1} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top} (31) \\\\ \\Rightarrow \\sum_ {i = 1} ^ {b} \\eta_ {i} \\frac {\\beta_ {b}}{\\beta_ {i}} \\nabla \\ell \\left(W _ {1};; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) = \\mathbf {E} _ {b} \\odot \\mathbf {B} _ {b} \\odot \\left(W _ {1} \\mathbf {K} - \\mathbf {V}\\right) \\mathbf {K} ^ {\\top}, (32) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.741, + 0.488, + 0.756 + ], + "angle": 0, + "content": "where matrices are defined the same as for Equation 29." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.779, + 0.291, + 0.799 + ], + "angle": 0, + "content": "6 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.809, + 0.919, + 0.87 + ], + "angle": 0, + "content": "In our experimental evaluations, we aim to answer three main questions: (1) Does different attentional biases results in different architectures in practice? (2) How does different types of retention gates (i.e., retention gate) affect the performance of the model in long context? (3) How do MEMORA, MONETA, and YAAD perform in downstream tasks compare to baselines?" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.883, + 0.92, + 0.914 + ], + "angle": 0, + "content": "Setup. We train our models with training context window of size 4096 using either FineWeb-Edu dataset (Penedo et al. 2024) (for LM and common-sense reasoning tasks) or C4 dataset (Raffel et al. 2020) (for scaling patterns). We use model" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.092, + 0.378, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.092, + 0.647, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.092, + 0.918, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.228, + 0.921, + 0.259 + ], + "angle": 0, + "content": "Figure 3: Scaling patterns when increasing (Left) model size, (Middle) sequence length (model size = 340M) (3) (Right) sequence length (model size = 760M) on C4 dataset." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.286, + 0.921, + 0.332 + ], + "angle": 0, + "content": "sizes of 120M, 340M, 760M, and 1.3B parameters. We train small models (120M and 340M) on 15B tokens sampled from the dataset, the medium size model (760M) on 30B tokens, and the large model on 100B tokens. Baseline results are reported by Behrouz et al. (2024c)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.35, + 0.617, + 0.368 + ], + "angle": 0, + "content": "6.1 Language Modeling and Common-sense Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.374, + 0.922, + 0.48 + ], + "angle": 0, + "content": "We follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a,c) and first focus on the perplexity in language modeling and also commonsense reasoning tasks. The results for MEMORA, YAAD, MONETA and also baselines with size of 340M, 760, and 1.3B are reported in Table 2. All of our variants outperforms all the baselines including Transformer++, modern linear recurrent models and hybrid methods. The superior performance compared to hybrid models is particularly important as all of our variants are pure recurrent (attention-free). Among the three variants of MirAS, while MONETA achieves slightly weaker performance than MEMORA, and YAAD, the other two variants are close and depending on the task and model size, the best model can vary." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.498, + 0.296, + 0.515 + ], + "angle": 0, + "content": "6.2 Scaling Pattern" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.523, + 0.919, + 0.555 + ], + "angle": 0, + "content": "To evaluate the scaling pattern of models and for comparing them with baseline, in this section, we plot their performance with varying the model size and the context window." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.567, + 0.92, + 0.675 + ], + "angle": 0, + "content": "Context Length. We first vary the training context length from 2K to 32K for two versions of our model with size 340M and 760M. The results are reported in Figure 3 (Middle and Right). All three variants of Miras scales better than state-of-the-art baselines when increasing the context length. We attribute this superior performance to: (1) expressive memory architecture. Contrary to baselines like Mamba2 and GSA that uses vector- and matrix-valued memory, our variants are using 2-layer MLPs with more expressive power to learn from longer sequences. (2) The choice of retention gate and attentional bias: All of our three variants go beyond the standard attentional biases and retention gates. These choices can help the memory to better manage its fixed-size capacity." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.686, + 0.92, + 0.735 + ], + "angle": 0, + "content": "Model Size. We also report the #FLOPs vs. perplexity of our models and baselines in Figure 3 (Left). All three variants outperforms all baselines given almost the same budget of FLOPs. These results, once again support the importance of powerful memory design." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.75, + 0.336, + 0.768 + ], + "angle": 0, + "content": "6.3 Needle In Haystack" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.775, + 0.922, + 0.88 + ], + "angle": 0, + "content": "To evaluate the effective context window of our models and baselines, we use needle-in-haystack task. In this task, we evaluate the model on retrieving a piece of information (i.e., the \"needle\") from long distractor texts (i.e., the \"haystack\"). We focus on the Single NIAH (S-NIAH) task from RULER benchmark (Hsieh et al. 2024) and evaluate our models and baselines on sequences with length 1K, 2K, 4K, and 8K. The results are reported in Table 3. All our variants outperforms all the baselines with a considerable margin. Interestingly, MONETA shows better performance than others when the data is synthetic noise (S-NIAH-PK). This observation validates the effectiveness of \\( p \\)-norm objective and retention gates as they are more robust to noise." + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.091, + 0.922, + 0.144 + ], + "angle": 0, + "content": "Table 2: Performance of MIRAS's variants and baselines on language modeling and common-sense reasoning tasks. Hybrid models are marked with *. The best results of simple and hybrid models are highlighted. In largest scale, we compare our simple models with even hybrid models and highlight the best results." + }, + { + "type": "table", + "bbox": [ + 0.158, + 0.156, + 0.877, + 0.761 + ], + "angle": 0, + "content": "
ModelWiki. ppl ↓LMB. ppl ↓LMB. acc ↑PIQA acc ↑Hella. acc_n ↑Wino. acc ↑ARC-e acc ↑ARC-c acc_n ↑SIQA acc ↑BoolQ acc ↑
340M params / 15B tokens
Transformer++31.5241.0830.7662.9834.7650.5345.2124.0536.8158.24
RetNet32.5049.7328.2462.6134.1550.9144.2723.6236.7959.72
GLA28.5143.0228.7364.0535.9650.0054.1924.2937.1358.39
Mamba30.8340.2129.9463.7935.8849.8249.2424.5635.4160.07
DeltaNet28.6547.3028.4363.5235.9549.6352.6825.3737.9658.79
TTT27.4434.1930.0663.9735.7150.0853.0126.1137.3259.83
Gated DeltaNet27.0130.9434.1163.0838.1251.6055.2826.7734.8959.54
MONETA (ours)26.1929.3135.7063.9939.2352.0455.9627.1537.2960.22
YAAD (ours)26.6129.1134.0964.9339.8651.1254.7528.6433.8260.29
MEMORA (ours)27.1630.4433.6865.2139.1751.2353.4027.9934.159.29
760M params / 30B tokens
Transformer++25.2127.6435.7866.9242.1951.9560.3832.4639.5160.37
RetNet26.0824.4534.5167.1941.6352.0963.1732.7838.3657.92
Mamba222.9428.3733.5467.9042.7149.7763.4831.0940.0658.15
DeltaNet24.3724.6037.0666.9341.9850.6564.8731.3939.8859.02
TTT24.1723.5134.7467.2543.9250.9964.5333.8140.1659.58
Gated DeltaNet21.1822.0935.5468.0144.9550.7366.8733.0939.2159.14
Samba*20.6322.7139.7269.1947.3552.0166.9233.2038.9861.24
Gated DeltaNet-H2*19.8820.8339.1868.9548.2252.5767.0135.4939.3961.11
MONETA (ours)21.1821.9438.0269.5549.1653.0167.4736.0940.5363.18
YAAD (ours)20.9921.5737.8569.1450.0253.9367.7836.2741.0163.34
MEMORA (ours)22.2822.3138.1967.8249.3053.2863.5736.1540.9462.96
MONETA-H (ours)18.7220.1340.5970.8450.1354.1767.6436.7940.8762.43
YAAD-H (ours)18.5919.8040.2269.5150.4853.6968.0436.5540.2861.94
MEMORA-H (ours)18.2420.5539.9169.0649.8452.8866.9036.1240.9961.75
1.3B params / 100B tokens
Transformer++18.5318.3242.6070.0250.2353.5168.8335.1040.6657.09
RetNet19.0817.2740.5270.0749.1654.1467.3433.7840.7860.39
Mamba216.5612.5645.6671.8755.6755.2472.4737.8840.2060.13
DeltaNet17.7116.8842.4670.7250.9353.3568.4735.6640.2255.29
Gated DeltaNet16.4212.1746.6572.2555.7657.4571.2138.3940.6360.24
Samba*16.1313.2944.9470.9453.4255.5668.8136.1739.9662.11
Gated DeltaNet-H2*15.9112.5548.7672.1956.8857.7771.3339.0741.9161.55
MONETA (ours)15.5211.4747.8873.1656.1459.0972.5340.3241.9161.18
YAAD (ours)15.1811.8947.2372.8156.4659.0272.1440.0540.7361.86
MEMORA (ours)15.9012.0448.6773.1055.9957.3671.5537.9240.1961.34
" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.785, + 0.295, + 0.803 + ], + "angle": 0, + "content": "6.4 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.81, + 0.92, + 0.84 + ], + "angle": 0, + "content": "In this section we perform ablation studies to validate if different design choices that we discussed through the paper are positively contributing for achieving better results." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.853, + 0.922, + 0.9 + ], + "angle": 0, + "content": "The Effect of \\( p \\) on Performance. We first evaluate the effect of \\( p \\) on the performance of MONETA. We vary the value of \\( p \\in \\{1, 1.5, 2, 2.8, 3, 3.2, 4\\} \\) and context window from 2K to 16K. The results are reported in Figure 4. Interestingly, there is no monotone pattern when increasing the value of \\( p \\) and the best performance is achieved when \\( p = 3 \\), while \\( p = 4 \\)" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.091, + 0.92, + 0.122 + ], + "angle": 0, + "content": "Table 3: Performance of MONETA, YAAD, MEMORA, and baselines on NIAH task from RULER benchmark. The best results with highest accuracy are highlighted." + }, + { + "type": "table", + "bbox": [ + 0.258, + 0.132, + 0.778, + 0.269 + ], + "angle": 0, + "content": "
ModelS-NIAH-PKS-NIAH-NS-NIAH-WAverage
2K4K8K2K4K8K1K2K4K
Mamba298.661.431.098.455.814.262.242.24.252.0
DeltaNet96.898.898.647.215.412.885.246.220.057.9
Gated DeltaNet89.891.490.099.291.826.486.482.624.475.8
TTT98.498.898.060.236.610.285.878.828.066.1
MONETA99.498.898.899.499.492.892.288.270.893.5
YaAD99.298.694.499.898.693.291.889.667.492.9
MEMORA99.298.892.698.499.293.292.488.270.492.1
" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.295, + 0.92, + 0.327 + ], + "angle": 0, + "content": "achieves the worst performance. Also, although different values of \\( p \\) results in different memory modules with varied performance, the scaling pattern when increasing the context length is almost the same." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.339, + 0.92, + 0.4 + ], + "angle": 0, + "content": "The Effect of \\( q \\) on Performance. Similarly, we evaluate the effect of \\( q \\) by varying it in \\( \\{2, 3, 4, 5\\} \\). Interestingly, contrary to \\( p \\), the value of \\( q \\) can change the scaling pattern when increasing the context length. The main reason for this observation is that the value of \\( q \\) determines the retention gate and a powerful retention gate can improve the memory management, resulting in better performance." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.413, + 0.92, + 0.49 + ], + "angle": 0, + "content": "The Effect of Design. To evaluate the architectural design choices, we perform an ablation study on YAAD. The results are in Table 4. The first row, reports the performance of YAAD, while (1) the second row removes the retention (i.e., \\(\\beta = 1\\)), (2) third row makes \\(\\delta\\) input independent, (3) the third row removes \\(\\ell_2\\)-loss from the Huber loss, (4) the forth row removes the \\(\\ell_1\\) condition, and (5) the last row replaces the MLP with a linear layer. These results indicate that all design choices are contributing to the performance of the model." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.519, + 0.35, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.517, + 0.597, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.643, + 0.6, + 0.674 + ], + "angle": 0, + "content": "Figure 4: The effect of parameters \\( p \\) and \\( q \\) on the performance with different context length." + }, + { + "type": "table_caption", + "bbox": [ + 0.657, + 0.525, + 0.92, + 0.555 + ], + "angle": 0, + "content": "Table 4: Ablation study on the components of YAAD." + }, + { + "type": "table", + "bbox": [ + 0.675, + 0.566, + 0.905, + 0.687 + ], + "angle": 0, + "content": "
ModelAvg. LM
YAAD53.98
- Retention Gate50.63
- Input-dependent δ52.19
l2-loss52.86
l1-loss53.04
linear memory51.57
" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.705, + 0.276, + 0.723 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.737, + 0.92, + 0.843 + ], + "angle": 0, + "content": "In this paper, we present MIRAS, a general framework that explains the connection of online optimization and test time memorization. MIRAS framework can explain the role of several standard architectural choices in the literature (e.g., forget gate) and helps design next generation of architectures that are capable of managing the memory better. Building upon our framework, we present three novel sequence models, each of which with its own (dis)advantages. Our experimental evaluations show that all these variants are more powerful than Transformers and linear RNNs, in various downstream tasks. In this work, we present a diverse set of variants using MIRAS. In future, exploring these alternative architectures for different downstream tasks is an interesting future direction." + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.09, + 0.236, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.116, + 0.922, + 0.161 + ], + "angle": 0, + "content": "[1] Ali Behrouz, Parsa Delavari, and Farnoosh Hashemi. \"Unsupervised Representation Learning of Brain Activity via Bridging Voxel Activity and Functional Connectivity\". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=n0jZfpLyh1." + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.161, + 0.921, + 0.19 + ], + "angle": 0, + "content": "[2] Ali Behrouz, Michele Santacatterina, and Ramin Zabih. \"Mambamixer: Efficient selective state space models with dual token and channel selection\". In: arXiv preprint arXiv:2403.19888 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.191, + 0.921, + 0.221 + ], + "angle": 0, + "content": "[3] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. \"Titans: Learning to memorize at test time\". In: arXiv preprint arXiv:2501.00663 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.221, + 0.919, + 0.252 + ], + "angle": 0, + "content": "[4] Alberto Bietti, Vivien Cabannes, Diane Bouchacourt, Herve Jegou, and Leon Bottou. \"Birth of a transformer: A memory viewpoint\". In: Advances in Neural Information Processing Systems 36 (2023), pp. 1560-1588." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.252, + 0.918, + 0.282 + ], + "angle": 0, + "content": "[5] Yonatan Bisk, Rowan Zellers, Jianfeng Gao, Yejin Choi, et al. \"Piqa: Reasoning about physical commonsense in natural language\". In: Proceedings of the AAAI conference on artificial intelligence. Vol. 34. 2020, pp. 7432-7439." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.282, + 0.891, + 0.298 + ], + "angle": 0, + "content": "[6] Leon Bottou and Vladimir Vapnik. \"Local learning algorithms\". In: Neural computation 4.6 (1992), pp. 888-900." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.298, + 0.921, + 0.387 + ], + "angle": 0, + "content": "[7] Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. \"BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions\". In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). Ed. by Jill Burstein, Christy Doran, and Thamar Solorio. Minneapolis, Minnesota: Association for Computational Linguistics, June 2019, pp. 2924-2936. DOI: 10.18653/v1/N19-1300. URL: https://aclanthology.org/N19-1300/." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.387, + 0.921, + 0.433 + ], + "angle": 0, + "content": "[8] Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. \"Think you have solved question answering? try arc, the ai2 reasoning challenge\". In: arXiv preprint arXiv:1803.05457 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.433, + 0.921, + 0.463 + ], + "angle": 0, + "content": "[9] Imre Csiszar. \"On information-type measure of difference of probability distributions and indirect observations\". In: Studia Sci. Math. Hungar. 2 (1967), pp. 299-318." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.463, + 0.921, + 0.509 + ], + "angle": 0, + "content": "[10] Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. \"Recurrent Neural Networks Learn to Store and Generate Sequences using Non-Linear Representations\". In: Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP. 2024, pp. 248-262." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.509, + 0.921, + 0.553 + ], + "angle": 0, + "content": "[11] Karan Dalal, Daniel Koceja, Gashon Hussein, Jiarui Xu, Yue Zhao, Youjin Song, Shihao Han, Ka Chun Cheung, Jan Kautz, Carlos Guestrin, et al. \"One-Minute Video Generation with Test-Time Training\". In: arXiv preprint arXiv:2504.05298 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.554, + 0.921, + 0.584 + ], + "angle": 0, + "content": "[12] Tri Dao and Albert Gu. \"Transformers are SSMs: Generalized models and efficient algorithms through structured state space duality\". In: arXiv preprint arXiv:2405.21060 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.584, + 0.921, + 0.629 + ], + "angle": 0, + "content": "[13] Soham De, Samuel L Smith, Anushan Fernando, Aleksandar Botev, George Cristian-Muraru, Albert Gu, Ruba Haroun, Leonard Berrada, Yutian Chen, Srivatsan Srinivasan, et al. \"Griffin: Mixing gated linear recurrences with local attention for efficient language models\". In: arXiv preprint arXiv:2402.19427 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.629, + 0.921, + 0.675 + ], + "angle": 0, + "content": "[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. \"An image is worth 16x16 words: Transformers for image recognition at scale\". In: arXiv preprint arXiv:2010.11929 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.675, + 0.921, + 0.704 + ], + "angle": 0, + "content": "[15] Yossi Gandelsman, Yu Sun, Xinlei Chen, and Alexei Efros. \"Test-time training with masked autoencoders\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 29374-29385." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.705, + 0.921, + 0.735 + ], + "angle": 0, + "content": "[16] Xavier Gonzalez, Andrew Warrington, Jimmy Smith, and Scott Linderman. \"Towards scalable and stable parallelization of nonlinear rnns\". In: Advances in Neural Information Processing Systems 37 (2024), pp. 5817-5849." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.735, + 0.921, + 0.765 + ], + "angle": 0, + "content": "[17] Riccardo Grazzi, Julien Siems, Jörg KH Franke, Arber Zela, Frank Hutter, and Massimiliano Pontil. \"Unlocking state-tracking in linear rnns through negative eigenvalues\". In: arXiv preprint arXiv:2411.12537 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.765, + 0.921, + 0.795 + ], + "angle": 0, + "content": "[18] Klaus Greff, Rupesh K Srivastava, Jan Koutnk, Bas R Steunebrink, and Jürgen Schmidhuber. \"LSTM: A search space odyssey\". In: IEEE transactions on neural networks and learning systems 28.10 (2016), pp. 2222-2232." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.795, + 0.921, + 0.825 + ], + "angle": 0, + "content": "[19] Albert Gu and Tri Dao. \"Mamba: Linear-Time Sequence Modeling with Selective State Spaces\". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=tEYskw1VY2." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.825, + 0.921, + 0.87 + ], + "angle": 0, + "content": "[20] Albert Gu, Karan Goel, and Christopher Re. \"Efficiently Modeling Long Sequences with Structured State Spaces\". In: International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=uYLFOz1v1AC." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.116, + 0.922, + 0.87 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.092, + 0.922, + 0.137 + ], + "angle": 0, + "content": "[21] Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. \"Liquid Structural State-Space Models\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=g4OTKRKfS7R." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.138, + 0.809, + 0.153 + ], + "angle": 0, + "content": "[22]Trevor Hastie, Robert Tibshirani, Jerome Friedman, et al. The elements of statistical learning. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.155, + 0.92, + 0.183 + ], + "angle": 0, + "content": "[23] Elad Hazan et al. \"Introduction to online convex optimization\". In: Foundations and Trends® in Optimization 2.3-4 (2016), pp. 157-325." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.184, + 0.848, + 0.199 + ], + "angle": 0, + "content": "[24] Donald Olding Hebb. The organization of behavior: A neuropsychological theory. Psychology press, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.2, + 0.917, + 0.214 + ], + "angle": 0, + "content": "[25] Dan Hendrycks and Kevin Gimpel. \"Gaussian error linear units (gelus)\". In: arXiv preprint arXiv:1606.08415 (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.215, + 0.921, + 0.243 + ], + "angle": 0, + "content": "[26] Donald E Hilt and Donald W Seegrist. Ridge, a computer program for calculating ridge regression estimates. Vol. 236. Department of Agriculture, Forest Service, Northeastern Forest Experiment ..., 1977." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.244, + 0.919, + 0.274 + ], + "angle": 0, + "content": "[27] Arthur E Hoerl and Robert W Kennard. \"Ridge regression: applications to nonorthogonal problems\". In: Technometrics 12.1 (1970), pp. 69-82." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.275, + 0.921, + 0.304 + ], + "angle": 0, + "content": "[28] John J Hopfield. “Neural networks and physical systems with emergent collective computational abilities.” In: Proceedings of the national academy of sciences 79.8 (1982), pp. 2554-2558." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.305, + 0.921, + 0.349 + ], + "angle": 0, + "content": "[29] Cheng-Ping Hsieh, Simeng Sun, Samuel Kriman, Shantanu Acharya, Dima Rekesh, Fei Jia, and Boris Ginsburg. \"RULER: What's the Real Context Size of Your Long-Context Language Models?\" In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=kIoBbc76Sy." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.35, + 0.921, + 0.38 + ], + "angle": 0, + "content": "[30] Jerry Yao-Chieh Hu, Dennis Wu, and Han Liu. \"Provably optimal memory capacity for modern hopfield models: Transformer-compatible dense associative memories as spherical codes\". In: arXiv preprint arXiv:2410.23126 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.381, + 0.921, + 0.41 + ], + "angle": 0, + "content": "[31] Peter J Huber. \"Robust estimation of a location parameter\". In: Breakthroughs in statistics: Methodology and distribution. Springer, 1992, pp. 492-518." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.411, + 0.919, + 0.44 + ], + "angle": 0, + "content": "[32] Kazuki Irie, Robert Csordas, and Jürgen Schmidhuber. \"Practical computational power of linear transformers and their recurrent and self-referential extensions\". In: arXiv preprint arXiv:2310.16076 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.441, + 0.921, + 0.47 + ], + "angle": 0, + "content": "[33] Kazuki Irie, Imanol Schlag, Robert Csordas, and Jurgen Schmidhuber. \"Going beyond linear transformers with recurrent fast weight programmers\". In: Advances in neural information processing systems 34 (2021), pp. 7703-7717." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.471, + 0.921, + 0.5 + ], + "angle": 0, + "content": "[34] Vidit Jain and Erik Learned-Miller. \"Online domain adaptation of a pre-trained cascade of classifiers\". In: CVPR 2011. IEEE. 2011, pp. 577-584." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.501, + 0.921, + 0.545 + ], + "angle": 0, + "content": "[35] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. \"Scaling laws for neural language models\". In: arXiv preprint arXiv:2001.08361 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.546, + 0.733, + 0.561 + ], + "angle": 0, + "content": "[36] M. Karami and V. Mirrokni. Lattice: Learning to Efficiently Compress the Memory. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.562, + 0.921, + 0.606 + ], + "angle": 0, + "content": "[37] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. \"Transformers are rnns: Fast autoregressive transformers with linear attention\". In: International conference on machine learning. PMLR. 2020, pp. 5156-5165." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.607, + 0.78, + 0.621 + ], + "angle": 0, + "content": "[38] Dmitry Krotov. \"Hierarchical associative memory\". In: arXiv preprint arXiv:2107.06446 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.622, + 0.921, + 0.652 + ], + "angle": 0, + "content": "[39] Dmitry Krotov and John J Hopfield. “Dense associative memory for pattern recognition”. In: Advances in neural information processing systems 29 (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.653, + 0.921, + 0.696 + ], + "angle": 0, + "content": "[40] Aonian Li, Bangwei Gong, Bo Yang, Boji Shan, Chang Liu, Cheng Zhu, Chunhao Zhang, Congchao Guo, Da Chen, Dong Li, et al. \"Minimax-01: Scaling foundation models with lightning attention\". In: arXiv preprint arXiv:2501.08313 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.697, + 0.921, + 0.727 + ], + "angle": 0, + "content": "[41] Chengxuan Li, Di Huang, Zeyu Lu, Yang Xiao, Qingqi Pei, and Lei Bai. “A survey on long video generation: Challenges, methods, and prospects”. In: arXiv preprint arXiv:2403.16407 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.728, + 0.921, + 0.757 + ], + "angle": 0, + "content": "[42] Xiaoyu Li, Yuanpeng Li, Yingyu Liang, Zhenmei Shi, and Zhao Song. \"On the expressive power of modern hopfield networks\". In: arXiv preprint arXiv:2412.05562 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.758, + 0.921, + 0.801 + ], + "angle": 0, + "content": "[43] Yi Heng Lim, Qi Zhu, Joshua Selfridge, and Muhammad Firmansyah Kasim. \"Parallelizing non-linear sequential models over the sequence length\". In: The Twelfth International Conference on Learning Representations. 2024. URL: https://openreview.net/forum?id=E34A1VLN0v." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.802, + 0.921, + 0.832 + ], + "angle": 0, + "content": "[44] Bo Liu, Rui Wang, Lemeng Wu, Yihao Feng, Peter Stone, and Qiang Liu. \"Longhorn: State space models are amortized online learners\". In: arXiv preprint arXiv:2407.14207 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.833, + 0.921, + 0.878 + ], + "angle": 0, + "content": "[45] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. \"Lost in the middle: How language models use long contexts\". In: Transactions of the Association for Computational Linguistics 12 (2024), pp. 157-173." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.879, + 0.827, + 0.894 + ], + "angle": 0, + "content": "[46] Elizabeth F Loftus. \"The reality of repressed memories.\" In: American psychologist 48.5 (1993), p. 518." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.092, + 0.922, + 0.894 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.525, + 0.949 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.092, + 0.922, + 0.122 + ], + "angle": 0, + "content": "[47] Carlo Lucibello and Marc Mézard. \"Exponential capacity of dense associative memories\". In: Physical Review Letters 132.7 (2024), p. 077301." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.123, + 0.922, + 0.153 + ], + "angle": 0, + "content": "[48] Julien Mairal. \"Incremental majorization-minimization optimization with application to large-scale machine learning\". In: SIAM Journal on Optimization 25.2 (2015), pp. 829-855." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.154, + 0.922, + 0.197 + ], + "angle": 0, + "content": "[49] Harsh Mehta, Ankit Gupta, Ashok Cutkosky, and Behnam Neyshabur. \"Long Range Language Modeling via Gated State Spaces\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=5MkYIYCbva." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.199, + 0.921, + 0.228 + ], + "angle": 0, + "content": "[50] Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. “Pointer Sentinel Mixture Models”. In: International Conference on Learning Representations. 2017. URL: https://openreview.net/forum?id=Byj72udxe." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.229, + 0.921, + 0.259 + ], + "angle": 0, + "content": "[51] William Merrill, Jackson Petty, and Ashish Sabharwal. \"The Illusion of State in State-Space Models\". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=QZgo9JZpLq." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.26, + 0.921, + 0.304 + ], + "angle": 0, + "content": "[52] Ravi Teja Mullapudi, Steven Chen, Keyi Zhang, Deva Ramanan, and Kayvon Fatahalian. \"Online model distillation for efficient video inference\". In: Proceedings of the IEEE/CVF International conference on computer vision. 2019, pp. 3573-3582." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.305, + 0.921, + 0.334 + ], + "angle": 0, + "content": "[53] Tsendsuren Munkhdalai, Alessandro Sordoni, Tong Wang, and Adam Trischler. “Metalearned neural memory”. In: Advances in Neural Information Processing Systems 32 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.335, + 0.921, + 0.365 + ], + "angle": 0, + "content": "[54] Tsendsuren Munkhdalai and Hong Yu. \"Neural semantic encoders\". In: Proceedings of the conference. Association for Computational Linguistics. Meeting. Vol. 1. NIH Public Access. 2017, p. 397." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.365, + 0.921, + 0.395 + ], + "angle": 0, + "content": "[55] Daniel Neil, Jun Haeng Lee, Tobi Delbruck, and Shih-Chii Liu. \"Delta networks for optimized recurrent network computation\". In: International conference on machine learning. PMLR. 2017, pp. 2584-2593." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.396, + 0.921, + 0.425 + ], + "angle": 0, + "content": "[56] Hideyuki Okano, Tomoo Hirano, and Evan Balaban. \"Learning and memory\". In: Proceedings of the National Academy of Sciences 97.23 (2000), pp. 12403-12404." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.426, + 0.921, + 0.47 + ], + "angle": 0, + "content": "[57] Antonio Orvieto, Samuel L Smith, Albert Gu, Anushan Fernando, Caglar Gulcehre, Razvan Pascanu, and Soham De. \"Resurrecting recurrent neural networks for long sequences\". In: International Conference on Machine Learning. PMLR. 2023, pp. 26670-26698." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.471, + 0.921, + 0.545 + ], + "angle": 0, + "content": "[58] Denis Paperno, German Kruszewski, Angeliki Lazaridou, Ngoc Quan Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernandez. \"The LAMBADA dataset: Word prediction requiring a broad discourse context\". In: Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Ed. by Katrin Erk and Noah A. Smith. Berlin, Germany: Association for Computational Linguistics, Aug. 2016, pp. 1525-1534. DOI: 10.18653/v1/P16-1144. URL: https://aclanthology.org/P16-1144/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.546, + 0.921, + 0.605 + ], + "angle": 0, + "content": "[59] Guilherme Penedo, Hynek Kydlcek, Loubna Ben allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro Von Werra, and Thomas Wolf. \"The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale\". In: The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2024. URL: https://openreview.net/forum?id=n6Sckn2QaG." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.607, + 0.922, + 0.711 + ], + "angle": 0, + "content": "[60] Bo Peng, Eric Alcaide, Quentin Gregory Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Nguyen Chung, Leon Derczynski, Xingjian Du, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, Jiaju Lin, Krishna Sri Ipsit Mantri, Ferdinand Mom, Atsushi Saito, Guangyu Song, Xiangru Tang, Johan S. Wind, Stanisław Wozniak, Zhenyuan Zhang, Qinghua Zhou, Jian Zhu, and Rui-Jie Zhu. \"RWKV: Reinventing RNNs for the Transformer Era\". In: The 2023 Conference on Empirical Methods in Natural Language Processing. 2023. URL: https://openreview.net/forum?id=7SaXczaBpG." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.712, + 0.922, + 0.757 + ], + "angle": 0, + "content": "[61] Bo Peng, Daniel Goldstein, Quentin Anthony, Alon Albalak, Eric Alcaide, Stella Biderman, Eugene Cheah, Xingjian Du, Teddy Ferdinan, Haowen Hou, et al. \"Eagle and finch: Rwkv with matrix-valued states and dynamic recurrence\". In: arXiv preprint arXiv:2404.05892 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.758, + 0.921, + 0.802 + ], + "angle": 0, + "content": "[62] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Saiteja Utpala, et al. \"RWKV-7\" Goose\" with Expressive Dynamic State Evolution\". In: arXiv preprint arXiv:2503.14456 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.803, + 0.921, + 0.847 + ], + "angle": 0, + "content": "[63] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Siateja Utpala, et al. \"Rwkv-7\" goose\" with expressive dynamic state evolution\". In: arXiv preprint arXiv:2503.14456 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.848, + 0.905, + 0.863 + ], + "angle": 0, + "content": "[64] Yury Polyanskiy and Yihong Wu. Information theory: From coding to learning. Cambridge university press, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.864, + 0.914, + 0.879 + ], + "angle": 0, + "content": "[65] DL Prados and SC Kak. \"Neural network capacity using delta rule\". In: *Electronics Letters* 25.3 (1989), pp. 197-199." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.092, + 0.922, + 0.879 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.508, + 0.938, + 0.525, + 0.949 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.092, + 0.922, + 0.137 + ], + "angle": 0, + "content": "[66] Zhen Qin, Songlin Yang, Weixuan Sun, Xuyang Shen, Dong Li, Weigao Sun, and Yiran Zhong. \"HGRN2: Gated Linear RNNs with State Expansion\". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=y6SqBJfCSk." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.138, + 0.922, + 0.183 + ], + "angle": 0, + "content": "[67] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. \"Exploring the limits of transfer learning with a unified text-to-text transformer\". In: Journal of machine learning research 21.140 (2020), pp. 1-67." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.184, + 0.922, + 0.242 + ], + "angle": 0, + "content": "[68] Hubert Ramsauer, Bernhard Schäfl, Johannes Lehner, Philipp Seidl, Michael Widrich, Lukas Gruber, Markus Holzleitner, Thomas Adler, David Kreil, Michael K Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. \"Hopfield Networks is All You Need\". In: International Conference on Learning Representations. 2021. URL: https://openreview.net/forum?id=tL89RnzIiCd." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.243, + 0.922, + 0.274 + ], + "angle": 0, + "content": "[69] Meisam Razaviyayn, Mingyi Hong, and Zhi-Quan Luo. “A unified convergence analysis of block successive minimization methods for nonsmooth optimization”. In: SIAM Journal on Optimization 23.2 (2013), pp. 1126–1153." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.275, + 0.921, + 0.304 + ], + "angle": 0, + "content": "[70] Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. \"Samba: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling\". In: arXiv preprint arXiv:2406.07522 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.305, + 0.795, + 0.319 + ], + "angle": 0, + "content": "[71] Lee T Robertson. \"Memory and the brain\". In: Journal of dental education 66.1 (2002), pp. 30-42." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.32, + 0.919, + 0.349 + ], + "angle": 0, + "content": "[72] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. \"Winogrande: An adversarial winograd schema challenge at scale\". In: Communications of the ACM 64.9 (2021), pp. 99-106." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.35, + 0.921, + 0.38 + ], + "angle": 0, + "content": "[73] Imanol Schlag, Kazuki Irie, and Jürgen Schmidhuber. \"Linear transformers are secretly fast weight programmers\". In: International Conference on Machine Learning. PMLR. 2021, pp. 9355-9366." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.38, + 0.92, + 0.41 + ], + "angle": 0, + "content": "[74] JH Schmidhuber. \"Learning to control fast-weight memories: An alternative to recurrent nets. Accepted for publication in\". In: Neural Computation (1992)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.41, + 0.92, + 0.455 + ], + "angle": 0, + "content": "[75] Jürgen Schmidhuber. “Reducing the ratio between learning complexity and number of time varying variables in fully recurrent nets”. In: ICANN'93: Proceedings of the International Conference on Artificial Neural Networks Amsterdam, The Netherlands 13–16 September 1993 3. Springer. 1993, pp. 460–463." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.455, + 0.908, + 0.47 + ], + "angle": 0, + "content": "[76] Jürgen Schmidhuber and Sepp Hochreiter. \"Long Short-term Memory\". In: Neural Computation MIT-Press (1997)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.471, + 0.919, + 0.5 + ], + "angle": 0, + "content": "[77] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. \"Implicit Language Models are RNNs: Balancing Parallelization and Expressivity\". In: arXiv preprint arXiv:2502.07827 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.501, + 0.92, + 0.531 + ], + "angle": 0, + "content": "[78] Shai Shalev-Shwartz et al. \"Online learning and online convex optimization\". In: Foundations and Trends® in Machine Learning 4.2 (2012), pp. 107-194." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.531, + 0.921, + 0.576 + ], + "angle": 0, + "content": "[79] Julien Siems, Timur Carstensen, Arber Zela, Frank Hutter, Massimiliano Pontil, and Riccardo Grazzi. \"DeltaProduct: Increasing the Expressivity of DeltaNet Through Products of Householders\". In: arXiv preprint arXiv:2502.10297 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.577, + 0.921, + 0.62 + ], + "angle": 0, + "content": "[80] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. \"Simplified State Space Layers for Sequence Modeling\". In: The Eleventh International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=Ai8Hw3AXqks." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.621, + 0.921, + 0.665 + ], + "angle": 0, + "content": "[81] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. \"Simplified State Space Layers for Sequence Modeling\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=Ai8Hw3AXqks." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.667, + 0.92, + 0.697 + ], + "angle": 0, + "content": "[82] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. \"Rofomer: Enhanced transformer with rotary position embedding\". In: Neurocomputing 568 (2024), p. 127063." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.697, + 0.921, + 0.742 + ], + "angle": 0, + "content": "[83] Yu Sun, Xinhao Li, Karan Dalal, Jiarui Xu, Arjun Vikram, Genghan Zhang, Yann Dubois, Xinlei Chen, Xiaolong Wang, Sanmi Koyejo, et al. \"Learning to (learn at test time): Rnns with expressive hidden states\". In: arXiv preprint arXiv:2407.04620 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.742, + 0.919, + 0.772 + ], + "angle": 0, + "content": "[84] Yutao Sun, Li Dong, Shaohan Huang, Shuming Ma, Yuqing Xia, Jilong Xue, Jianyong Wang, and Furu Wei. \"Retentive network: A successor to transformer for large language models\". In: arXiv preprint arXiv:2307.08621 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.772, + 0.801, + 0.787 + ], + "angle": 0, + "content": "[85] W Scott Terry. Learning and memory: Basic principles, processes, and procedures. Routledge, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.788, + 0.921, + 0.817 + ], + "angle": 0, + "content": "[86] Robert Tibshirani. \"Regression shrinkage and selection via the lasso\". In: Journal of the Royal Statistical Society Series B: Statistical Methodology 58.1 (1996), pp. 267-288." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.818, + 0.921, + 0.862 + ], + "angle": 0, + "content": "[87] Matteo Tiezzi, Michele Casoni, Alessandro Betti, Tommaso Guidi, Marco Gori, and Stefano Melacci. \"On the resurgence of recurrent models for long sequences: Survey and research opportunities in the transformer era\". In: arXiv preprint arXiv:2402.08132 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.863, + 0.921, + 0.908 + ], + "angle": 0, + "content": "[88] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. \"Llama: Open and efficient foundation language models\". In: arXiv preprint arXiv:2302.13971 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.092, + 0.922, + 0.908 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.508, + 0.938, + 0.526, + 0.949 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.092, + 0.922, + 0.152 + ], + "angle": 0, + "content": "[89] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. \"Attention is All you Need\". In: Advances in Neural Information Processing Systems. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.153, + 0.922, + 0.228 + ], + "angle": 0, + "content": "[90] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. \"Attention is All you Need\". In: Advances in Neural Information Processing Systems. Ed. by I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.229, + 0.921, + 0.273 + ], + "angle": 0, + "content": "[91] Johannes Von Oswald, Maximilian Schlegel, Alexander Meulemans, Seijin Kobayashi, Eyvind Niklasson, Nicolas Zucchet, Nino Scherrer, Nolan Miller, Mark Sandler, Max Vlademyrov, et al. \"Uncovering mesa-optimization algorithms in transformers\". In: arXiv preprint arXiv:2309.05858 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.274, + 0.921, + 0.304 + ], + "angle": 0, + "content": "[92] Ke Alexander Wang, Jiaxin Shi, and Emily B Fox. \"Test-time regression: a unifying framework for designing sequence models with associative memory\". In: arXiv preprint arXiv:2501.12352 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.305, + 0.921, + 0.334 + ], + "angle": 0, + "content": "[93] Yingheng Wang, Zichen Wang, Gil Sadeh, Luca Zancato, Alessandro Achille, George Karypis, and Huzefa Rangwala. \"Long-context Protein Language Model\". In: bioRxiv (2024), pp. 2024-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.334, + 0.921, + 0.365 + ], + "angle": 0, + "content": "[94] Songlin Yang, Jan Kautz, and Ali Hatamizadeh. “Gated Delta Networks: Improving Mamba2 with Delta Rule”. In: arXiv preprint arXiv:2412.06464 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.365, + 0.921, + 0.409 + ], + "angle": 0, + "content": "[95] Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. “Gated Linear Attention Transformers with Hardware-Efficient Training”. In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=ia5XvxFUJT." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.41, + 0.921, + 0.44 + ], + "angle": 0, + "content": "[96] Songlin Yang, Bailin Wang, Yu Zhang, Yikang Shen, and Yoon Kim. \"Parallelizing linear transformers with the delta rule over sequence length\". In: Advances in Neural Information Processing Systems 37 (2024), pp. 115491-115522." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.44, + 0.921, + 0.5 + ], + "angle": 0, + "content": "[97] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. \"HellaSwag: Can a Machine Really Finish Your Sentence?\" In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Ed. by Anna Korhonen, David Traum, and Lluis Marquez. Florence, Italy: Association for Computational Linguistics, July 2019, pp. 4791-4800. DOI: 10.18653/v1/P19-1472. URL: https://aclanthology.org/P19-1472/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.501, + 0.921, + 0.53 + ], + "angle": 0, + "content": "[98] Biao Zhang and Rico Sennrich. \"Root mean square layer normalization\". In: Advances in Neural Information Processing Systems 32 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.53, + 0.921, + 0.577 + ], + "angle": 0, + "content": "[99] Hao Zhang, Alexander C Berg, Michael Maire, and Jitendra Malik. \"SVM-KNN: Discriminative nearest neighbor classification for visual category recognition\". In: 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06). Vol. 2. IEEE. 2006, pp. 2126-2136." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.092, + 0.922, + 0.577 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.525, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.427, + 0.109 + ], + "angle": 0, + "content": "A Additional Related Work" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.127, + 0.923, + 0.295 + ], + "angle": 0, + "content": "Modern Linear RNNs. Recent efforts aim to overcome Transformers quadratic cost and limitations in long-context modeling by designing efficient recurrent alternatives (Tiezzi et al. 2024), mainly due to fast inference and training of such models. The first generation of models—such as RetNet (Sun et al. 2023), LRU (Orvieto et al. 2023), RWKV (Peng et al. 2023), S5 (Smith et al. 2023), and S4 (Gu et al. 2022)—uses data-independent transition matrix mechanism with Hebbian-like update rule. The second generation of such models started to incorporate input-dependent parameters into such linear architectures (e.g., Griffin (De et al. 2024), SSMs (Behrouz et al. 2024b; Dao et al. 2024; Hasani et al. 2023), RWKV6 (Peng et al. 2024)), and/or use more expressive memory updating rule based on delta rule (Liu et al. 2024a; Peng et al. 2025b; Schlag et al. 2021; Yang et al. 2024a,c). The next generation of models, extend the memory architecture to deep models, while using delta-rule-like update rule (Sun et al. 2024), or momentum-based update rule (Behrouz et al. 2024c). Recently, to further enhance the performance of delta-rule-based sequence models, Siemens et al. (2025) suggest using multiple gradient descent update per token, resulting in more expressive sequence models in state tracking tasks." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.301, + 0.923, + 0.364 + ], + "angle": 0, + "content": "In addition to the above fast linear recurrent sequence models, several studies have focused on (interpretable) non-linear RNNs (Csordás et al. 2024; Gonzalez et al. 2024; Karami et al. 2025; Lim et al. 2024; Merrill et al. 2024; Schone et al. 2025; Von Oswald et al. 2023), and how their training can be faster (Gonzalez et al. 2024; Lim et al. 2024; Schone et al. 2025). However, due to the recurrent nature of such models, parallelizing them in larger scales is still challenging." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.374, + 0.923, + 0.468 + ], + "angle": 0, + "content": "Fast Weight Programs. The idea of interpretation of linear layers as the key-value associative memory system backs to Hopfield networks (Hopfield 1982) and then fast weight programs, in which dynamic fast programs are incorporated into recurrent neural networks as writeable memory (Schlag et al. 2021; Schmidhuber 1992; Schmidhuber 1993). The two learning rules of Hebbian (Hebb 2005) and delta rule (Prados et al. 1989) are the most popular learning rules for them, which have been extensively explored in the literature (Irie et al. 2021; Munkhdalai et al. 2019, 2017; Schlag et al. 2021; Schmidhuber 1992; Yang et al. 2024a,c)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.479, + 0.923, + 0.557 + ], + "angle": 0, + "content": "Test Time Training. The key ideas of learning at test time backs to early studies on local learning Bottou et al. 1992, in which each test data is trained on its neighbors before making a prediction (Gandelsman et al. 2022; Zhang et al. 2006). Later applying this idea on modern architectures, it has shown promising performance in diverse downstream tasks such as vision tasks (Jain et al. 2011; Mullapudi et al. 2019), video generation (Dalal et al. 2025), etc., mostly due to their ability to mitigate out-of-distribution samples." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.568, + 0.923, + 0.691 + ], + "angle": 0, + "content": "Hopfield Networks. We build MIRAS based on the concept of associative memory in its broad form, where we learn an underlying mapping between keys and values. One of the earliest studies that discuss building neural architectures based on associative memory is Hopfield Networks (Hopfield 1982), in which associative memory is defined as the minimizing the energy function required to store keys and values. While traditional Hopfield networks has limited applicability in recent years (mainly due to limited capacity of vector-valued memory and energy function), several recent studies aim to improve their capacity by various techniques (Krotov 2021; Krotov et al. 2016; Li et al. 2024b), including extending the energy function of such models based on exponential kernels (Krotov et al. 2016; Lucibello et al. 2024), and discuss their connection to Transformers (Hu et al. 2024; Ramsauer et al. 2021)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.702, + 0.924, + 0.917 + ], + "angle": 0, + "content": "Unifying Frameworks. In recent years, there have been growing efforts to understand the underlying mechanism of sequence models and unify (a subset of) them through a single perspective. Dao et al. (2024) present SSD framework to connect linear Transformers and (a subset of) linear recurrent models through the lens of associative operators and structured matrices. The SSD framework, however, is limited to models with vector or matrix-valued memory that are updated using a Hebbian-like update rules. Later, Liu et al. (2024a) present an online learning perspective on (a subset of) linear recurrent models. While this framework can also explain more expressive recurrent models based on delta rule, it is limited to online learners (i.e., models that optimize their internal associative memory using stochastic optimizers, such as stochastic gradient descent) with matrix-valued memory. Several modern sequence models, such as Transformers (Vaswani et al. 2017b) or Titans (Behrouz et al. 2024c) cannot be expressed in this framework. Sun et al. (2024) further provide a unifying perspective on how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models, mainly due to limiting the objective to be regression loss. Recently, in a concurrent work to ours, Wang et al. (2025) also force models to have the same attentional bias objective and show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss." + }, + { + "type": "page_number", + "bbox": [ + 0.508, + 0.938, + 0.527, + 0.95 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.093, + 0.922, + 0.168 + ], + "angle": 0, + "content": "However, this simplification, fully changes the understanding of underlying update rules in these models. For example, contrary to Wang et al. (2025), MIRAS can distinguish models with Hebbian-like update (with dot product similarity) and delta rule update (with regression loss). Furthermore, all presented sequence models in this work (e.g., MONETA, MEMORA, YAAD) as well as models like HGRN2 (Qin et al. 2024) are placed outside of this class of models, due to their different attentional bias." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.191, + 0.411, + 0.211 + ], + "angle": 0, + "content": "B Proof of Proposition 3.2" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.222, + 0.856, + 0.237 + ], + "angle": 0, + "content": "Here we present the proof of Proposition 3.2. For the sake of completeness, let us first re-state this Proposition." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.243, + 0.921, + 0.307 + ], + "angle": 0, + "content": "Proposition 3.2. Let \\(\\eta_t = \\eta\\) and define \\(h_t(W) \\coloneqq \\sum_{i=1}^{t-1} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i) + \\frac{1}{\\eta} R(W)\\). Assume \\(\\mathcal{W} = \\mathbb{R}^d\\) and the function \\(h_t(W)\\) is strictly convex in \\(W\\) and let \\(\\mathcal{D}_h(\\cdot, \\cdot)\\) be the Bregman divergence defined by function \\(h(\\cdot)\\), i.e., \\(\\mathcal{D}_h(W, W') = h(W) - h(W') - \\langle \\nabla h(W'), W - W' \\rangle\\). Set \\(\\mathrm{Ret}_t(W, W') = \\mathcal{D}_h(W, W')\\) and \\(\\widetilde{\\ell}_t(W; x_t) = \\widehat{\\ell}_t(W; x_t)\\) in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.32, + 0.921, + 0.396 + ], + "angle": 0, + "content": "Proof. Let \\(\\{\\widehat{W}_1, \\widehat{W}_2, \\ldots\\}\\) be the sequence of parameters obtained by (FTRL Viewpoint) and \\(\\{\\widetilde{W}_1, \\widetilde{W}_2, \\ldots\\}\\) be the sequence of parameters obtained by (Learning-Retaining Viewpoint). To show both update rules are equivalent, it suffices to show that the above two sequences are the same if they are initialized at the same point. We prove this statement by induction. First of all, since both sequences are initialized at the same point, the induction base is satisfied (i.e. \\(\\widetilde{W}_1 = \\widehat{W}_1\\)). Now, assume by induction hypothesis that" + }, + { + "type": "equation", + "bbox": [ + 0.472, + 0.394, + 0.92, + 0.412 + ], + "angle": 0, + "content": "\\[\n\\widetilde {W} _ {t - 1} = \\widehat {W} _ {t - 1}. \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.416, + 0.918, + 0.445 + ], + "angle": 0, + "content": "To complete the induction, we need to show \\(\\widetilde{W}_t = \\widehat{W}_t\\). To this end, notice that, by (Learning-Retaining Viewpoint), we have" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.445, + 0.667, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widetilde {\\ell} _ {t} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\operatorname {R e t} _ {t} (W, \\widetilde {W} _ {t - 1})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.472, + 0.764, + 0.487 + ], + "angle": 0, + "content": "Using the choice of the Attentional Bias and the Retention function in the Proposition, we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.495, + 0.92, + 0.579 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widehat {\\ell_ {t}} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W) - \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (\\widetilde {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) \\tag {34} \\\\ - \\frac {1}{\\eta} R (\\widetilde {W} _ {t - 1}) - \\left\\langle \\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} (\\widetilde {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\nabla R (\\widetilde {W} _ {t - 1}), W - \\widetilde {W} _ {t - 1} \\right\\rangle . \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.585, + 0.623, + 0.6 + ], + "angle": 0, + "content": "Ignoring the constant terms and using the induction hypothesis (33), we get" + }, + { + "type": "equation", + "bbox": [ + 0.294, + 0.608, + 0.92, + 0.692 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widehat {\\ell_ {t}} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W) \\tag {35} \\\\ - \\left\\langle \\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} (\\widehat {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\nabla R (\\widehat {W} _ {t - 1}), W - \\widehat {W} _ {t - 1} \\right\\rangle . \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.699, + 0.757, + 0.716 + ], + "angle": 0, + "content": "On the other hand, recall that \\(\\{\\widehat{W}_1,\\widehat{W}_2,\\ldots \\}\\) is obtained by (FTRL Viewpoint). Therefore, we have" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.724, + 0.669, + 0.763 + ], + "angle": 0, + "content": "\\[\n\\widehat {W} _ {t - 1} = \\arg \\min _ {W} \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\mathcal {R} _ {t} (W).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.77, + 0.215, + 0.784 + ], + "angle": 0, + "content": "Thus, we have" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.784, + 0.92, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} \\left(W _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) + \\frac {1}{\\eta} \\nabla R \\left(W _ {t - 1}\\right) = 0. \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.826, + 0.352, + 0.841 + ], + "angle": 0, + "content": "Combining (36) and (35), we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.85, + 0.657, + 0.888 + ], + "angle": 0, + "content": "\\[\n\\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\sum_ {i = 1} ^ {t} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.897, + 0.447, + 0.914 + ], + "angle": 0, + "content": "This implies \\(\\widetilde{W}_t = \\widehat{W}_t\\), which completes the proof." + }, + { + "type": "image", + "bbox": [ + 0.905, + 0.901, + 0.918, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.508, + 0.937, + 0.525, + 0.949 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.37, + 0.112 + ], + "angle": 0, + "content": "C Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.121, + 0.923, + 0.213 + ], + "angle": 0, + "content": "We perform experimental evaluation on the language modeling (Merit et al. 2017; Paperno et al. 2016), common-sense reasoning (Bisk et al. 2020; Clark et al. 2019; Clark et al. 2018; Sakaguchi et al. 2021; Zellers et al. 2019), and long context needle-in-haystack tasks (Hsieh et al. 2024). We compare our models with the state-of-the-art linear recurrent models, Transformers, and hybrid models (recurrent + attention). More specifically we compare with Transformer++ (Touvron et al. 2023), RetNet (Sun et al. 2023), Gated Linear Attention (GLA) (Yang et al. 2024b), Mamba (Gu et al. 2024), Mamba2 (Dao et al. 2024), DeltaNet (Yang et al. 2024c), TTT (Sun et al. 2024), and Gated DeltaNet (Yang et al. 2024a)." + }, + { + "type": "table_caption", + "bbox": [ + 0.413, + 0.225, + 0.618, + 0.239 + ], + "angle": 0, + "content": "Table 5: Architectural Details." + }, + { + "type": "table", + "bbox": [ + 0.319, + 0.251, + 0.716, + 0.338 + ], + "angle": 0, + "content": "
ModelBlockDimHeadPeak LRToken
170M12768163e-315B
340M241024161.5e-315B
780M241536161.25e-330B
" + }, + { + "type": "page_number", + "bbox": [ + 0.509, + 0.938, + 0.525, + 0.949 + ], + "angle": 0, + "content": "26" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_origin.pdf b/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e64a48b4a9af1819e987f70a8fbfe7094e293b1c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/55fad489-4439-46d5-b672-f9189f86f7ce_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcf6cc7978554305f03477b73fe5da9f15d6f9c6d4a50127811f1fb1e8b085a +size 1913860 diff --git a/data/2025/2504_13xxx/2504.13173/full.md b/data/2025/2504_13xxx/2504.13173/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a8b4ac527118fd67e58df3d96280db78c7987be0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/full.md @@ -0,0 +1,790 @@ +# It’s All Connected: A Journey Through Test-Time Memorization, Attentional Bias, Retention, and Online Optimization + +Ali Behrouz†, Meisam Razaviyayn†, Peilin Zhong†, and Vahab Mirrokni† + +Google Research + +{alibehrouz, Razaviyayn, peilinz, mirrokni}@google.com + +# Abstract + +Designing efficient and effective architectural backbones has been in the core of research efforts to enhance the capability of foundation models. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we reconceptualize neural architectures, including Transformers, Titans, and modern linear recurrent neural networks as associative memory modules that learn a mapping of keys and values using an internal objective, referred to as attentional bias. Surprisingly, we observed that most existing sequence models leverage either (1) dot-product similarity, or (2) $\ell_2$ regression objectives as their attentional bias. Going beyond these objectives, we present a set of alternative attentional bias configurations along with their effective approximations to stabilize their training procedure. We then reinterpret forgetting mechanisms in modern deep learning architectures as a form of retention regularization, providing a novel set of forget gates for sequence models. Building upon these insights, we present MIRAS, a general framework to design deep learning architectures based on four choices of: (i) associative memory architecture, (ii) attentional bias objective, (iii) retention gate, and (iv) memory learning algorithm. We present three novel sequence models—MONETA, YAAD, and MEMORA—that go beyond the power of existing linear RNNs while maintaining a fast parallelizable training process. Our experiments show different design choices in MIRAS yield models with varying strengths. For example, certain instances of MIRAS achieve exceptional performance in special tasks such as language modeling, commonsense reasoning, and recall intensive tasks, even outperforming Transformers and other modern linear recurrent models. + +# 1 Introduction + +Designing efficient architectural backbones for sequence modeling is a key to enhance the capability of foundation models in domains ranging from language (Behrouz et al. 2024c; Vaswani et al. 2017a) and computer vision (Dosovitskiy et al. 2020) to computational biology (Wang et al. 2024) and neuroscience (Behrouz et al. 2024a). While Transformers (Vaswani et al. 2017a), mainly due to their in-context learning and ability to learn at scale (Kaplan et al. 2020), have been firmly established as state-of-the-art (SOTA) models in sequence modeling, their quadratic time and space complexity limits their applicability in tasks that require long context modeling (Dalal et al. 2025; Li et al. 2024a; Liu et al. 2024b). + +Recent efforts aim to overcome Transformer limitations in long-context modeling by designing efficient recurrent alternatives (Behrouz et al. 2024c; Neil et al. 2017; Smith et al. 2022). Unlike Transformer's linearly growing memory (i.e., the KV cache), these models compress the context into a fixed size memory, demanding improved memory management for comparable performance. To design more effective architectures, studies focus on improving memory capacity and its management by using/designing more expressive: (1) Learning rules: from Hebbian rule (Hebb 2005) to Delta rule (Neil et al. 2017); (2) Forget gates: from LSTM's (Schmidhuber et al. 1997) to Mamba2's (Dao et al. 2024) and then Titan's forget gates (Behrouz et al. 2024c); and (3) More expressive memory architectures: from vector-valued memory in RetNet (Sun et al. 2023) and LRU (Orvieto et al. 2023) to neural deep memory in Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024). + +At the core of these advancements lies a critical question: "what is the underlying design framework behind these sequence models, and how can these models be enhanced?" Taking inspiration from the broad definitions of associative memory and learning in neuropsychology literature (Okano et al. 2000), several studies discuss the connection between Transformers + +and (linear) Recurrent Neural Networks (RNNs) with associative memory (Bietti et al. 2023; Hopfield 1982; Ramsauer et al. 2021). These studies, however, either: (1) lack a universal explanation to fully illustrate the underlying learning algorithms, (2) are limited to a specific definition of associative memory and lack generalizability, and/or (3) are unable to describe standard, widely used components such as forget gate. + +Contributions. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we re-conceptualize neural architectures, including Transformers, Titans, and other modern linear recurrent neural networks based on a broad definition of associative memory with attentional bias. We define and formalize the concept of attentional bias as the internal memory objective of sequence models (see Section 3) that aims to learn the underlying mapping between inputs (i.e., keys and values). Our formulation reveals that almost all existing sequence models are associative memories that leverage the same type of attentional bias. We reinterpret existing forgetting mechanisms in modern deep learning architectures as a form of retention $\ell_2$ -regularization for the attentional bias, and then provide a novel set of alternative retention gates (forget gate) for sequence models, providing new insights on how to balance learning new concepts and the retention of previously learned concepts. + +Building upon our formulation of memory and forget gate, we present MIRAs1, a fundamental framework to design novel sequence modeling architectures by four choice of: (1) Attentional bias (i.e., memory objective), (2) Retention gate, (3) Memory architecture, and (4) Memory learning algorithm (i.e., optimizer). We motivate and discuss several novel design choices, leading to novel architectures beyond existing sequence modeling architectures. + +Finally, we focus on three novel variants of MIRAS-MONETA, YAAD, and MEMORA—that are based on attentional biases beyond simple $\ell_2$ -regression objective as well as novel retention gating mechanisms that are more robust than existing ones. We further perform experimental evaluations of these three variants on language modeling, common-sense reasoning, needle-in-haystack, and recall intensive tasks. The results illustrate the superior performance of these variants, outperforming state-of-the-art sequence models. + +Roadmap. In Section 2, we review literature and discuss relevant concepts that we use through the paper. In Section 3, we present and discuss the broad definition of associative memory with formally defining the concept of attentional bias. We then discuss two viewpoints—Learning-Retaining and Follow-the-Regularized-Leader (FTRL)—to interpret sequence modeling through the lens of optimization and prove the generality of Learning-Retaining over FTRL. In Section 4, we present our MIRAS framework and discuss how it unifies modern sequence models. In Section 5, to show the potential of MIRAS framework, we discuss a variety of novel design choices for (1) attentional bias, and (2) retention gate (forget gate). Later in Section 5.3, we present three novel sequence models as the variants of MIRAS, and then discuss how to train them in a parallelizable manner. Finally, our experimental evaluations are reported in Section 6. + +# 2 Preliminaries and Background + +In this section, we review the related studies and background concepts that we use through the paper. + +Attention. Attention as the backbone of Transformers is a critical component that acts as their associative memory (Bietti et al. 2023). Given input $x \in \mathbb{R}^{N \times d_{\mathrm{in}}}$ , causal attention computes output $y \in \mathbb{R}^{N \times d_{\mathrm{in}}}$ based on Softmax over input dependent key, value, and query matrices: + +$$ +\mathbf {Q} = x \mathbf {W} _ {\mathrm {Q}}, \quad \mathbf {K} = x \mathbf {W} _ {\mathrm {K}}, \quad \mathbf {V} = x \mathbf {W} _ {\mathrm {V}}, \tag {1} +$$ + +$$ +\mathbf {y} _ {i} = \sum_ {j = 1} ^ {i} \frac {\exp \left(\mathbf {q} _ {i} ^ {\top} \mathbf {k} _ {j} / \sqrt {d _ {\mathrm {i n}}}\right) \mathbf {v} _ {j}}{\sum_ {\ell = 1} ^ {i} \exp \left(\mathbf {q} _ {i} ^ {\top} \mathbf {k} _ {\ell} / \sqrt {d _ {\mathrm {i n}}}\right)}, \tag {2} +$$ + +where $\mathbf{W}_{\mathrm{Q}}, \mathbf{W}_{\mathrm{K}}$ , and $\mathbf{W}_{\mathrm{V}} \in \mathbb{R}^{d_{\mathrm{in}} \times d_{\mathrm{in}}}$ are learnable parameters. While Transformers achieve significant improvements compared to traditional Recurrent Neural Networks (RNNs)—such as LSTM (Schmidhuber et al. 1997), their complexity that requires at least $N \times d$ operators to calculate the output has been the main motivation for researchers to think about alternative architectures. We divide and review the research efforts to design alternative architectures into two groups: (1) Linear shallow memory recurrent models, (2) Deep memory modules. + +# Associative Memory + +# Memory Architecture + +The neural architecture that stores memories. + +1. Vector +2.Matrix +3. Multilayer Perceptron (MLP) +4. Memory Mosaics + +# Attentional Bias + +The memory internal objective. + +1. $\ell_p$ Regression Loss +2. Dot Product Similarity +3. Huber Loss +4. KL-Divergence + +# Retention Gate + +The gate to retain the past state of the memory. + +1. $\ell_p$ Regularization (Local or Global) +2. Elastic Net Regularization +3. KL Divergence +4. Bregman Divergence + +# Memory Algorithm + +The algorithm that learns the mapping. + +1. Gradient Descent (GD) +2. GD with Momentum +3. Newton's Method +4. Non-parametric Solutions ... + +![](images/6872269beb2c79cf3c74cfd6c217260d93c6b062ce5d1a78710cdba618c4541b.jpg) +Associative Memory is a neural network that learns to map keys to values based on an Attentional Bias objective. +Figure 1: The overview of MIRAS framework. MIRAS is based on four critical choices of (1) memory architecture, (2) attentional bias, (3) retention gate, and (4) memory learning algorithm. In this framework, the memory architecture determines the model capacity to memorize; attentional bias is responsible for modeling the underlying mapping patterns; retention gate determines how to balance learning new concepts and the retention of previously learned concepts; and memory learning algorithm is responsible for memory management. + +(Linear) Recurrent Models. For many years, non-linear (gated) recurrent neural networks had been the de facto architectural backbones in deep learning (Greff et al. 2016). Their recurrent nature, however, results in non-parallelizable training, making their large scale training infeasible. To this end, in recent years, linear RNNs as alternatives to both Transformers and non-linear RNNs attract much attention mainly due to their parallelizable and linear-time training while maintaining competitive performance (Peng et al. 2025a; Sun et al. 2023; Yang et al. 2024c). Earlier variants of linear RNNs (De et al. 2024; Sun et al. 2023; Yang et al. 2024b), which mostly are based on Hebbian learning rule (Hebb 2005), aim to compress the data into their vector-valued (or matrix-valued) memory (De et al. 2024; Katharopoulos et al. 2020; Liu et al. 2024a; Sun et al. 2023; Yang et al. 2024b). Let $\mathcal{M}_t \in \mathbb{R}^{d \times n}$ be the memory ( $n = 1$ means vector-valued memory), and $\mathbf{k}, \mathbf{v} \in \mathbb{R}^d$ are keys and values (i.e., projection of input $x_t \in \mathbb{R}^d$ ), a simple general formulation for such linear RNNs can be written as: + +$$ +\mathcal {M} _ {t} = A _ {t} * \mathcal {M} _ {t - 1} + \mathbf {v} _ {t} \mathbf {k} _ {t} ^ {\top}, \tag {3} +$$ + +where $*$ is an arbitrary associative operator and $A_{t}$ is a data-(in)dependent diagonal matrix or a scalar (Yang et al. 2024c). Despite the efficiency that comes with the linear recurrent nature of these models, the memory can overflow mainly due to the additive (without replacement) nature of Hebbian learning rule, resulting in limited memory capacity and limited expressive power in in-context learning tasks. Moreover, the vector-valued memory of these architectures can limit their ability to learn/memorize large context window, mainly due to the limited expressive power of memory to learn the underlying patterns of data (Behrouz et al. 2024c; Sun et al. 2024). + +To address the above mentioned limitations, recurrent models that use a matrix-valued memory with Delta learning rule has gained popularity in recent years (Neil et al. 2017; Schlag et al. 2021; Yang et al. 2024c). Despite significant advantages, even these delta-rule-based recurrent models face theoretical limitations (Irie et al. 2023) with moderate performance in practice (Yang et al. 2024c). Recently, several studies aim to improve the performance of such models by adding scalar or channel-wise forget gate mechanisms (Peng et al. 2025b; Yang et al. 2024a), using negative eigenvalues (Grazzi et al. 2024), and multiple learning steps (Siems et al. 2025). They, however, still suffer from performance drop in long context, mainly due to the less expressive memory architectures (Behrouz et al. 2024c). + +Table 1: Overview of recent sequence models in MIRAS framework perspective. Surprisingly, all models are using the same type of attentional bias and regularization (forget gate). Note that these architectural choices does not uniquely identify the backbone as there are other design choices (e.g., input-dependency, channel-wise parameters, etc.) as well as the use of other components such as attention, convolutions, etc. Note that for attentional bias and retention gate, we are referring to the original design of MIRAS, discussed in Equation 4 and Remark 1. + +
ModelMemory ArchitectureAttentional BiasRetention Gate†Memory AlgorithmMemory Write Operation
Shallow Memory
RetNet (2023)VectorDot-ProductL2GDMt=αMt-1+vtktT
Transformer (2017)MatrixL2-NonparametricMt=Mt-1∪{kt, vt}
LA (2021)MatrixDot-Product-GDMt=Mt-1+vtktT
DFWMatrixDot-ProductL2GDMt=(βtαT) ⊙ Mt-1+vtktT
Lightening Attention (2025)MatrixDot-ProductL2GDMt=αMt-1+vtktT
GLA (2024)MatrixDot-ProductL2GDMt=Diag(αt)Mt-1+vtktT
Mamba (2024)MatrixDot-ProductL2GDMt=αMt-1+vtktT
HGRN2 (2024)MatrixL1L2GDMt=Diag(αt)Mt-1+vt(1-αt)T
DeltaNet (2017)MatrixL2-GDMt=(I-βtktkT)Mt-1+βtvtktT
Longhorn (2024)MatrixL2-Implicit GDMt=(I-βtktkT)Mt-1+(βt1+ktkβt)xtkT
TTT-Linear (2024)MatrixL2-GDMt=Mt-1-η∇L(Mt-1, xt)
Gated DeltaNet (2024)MatrixL2L2GDMt=(αt(I-βtktkT))Mt-1+βtvtktT
RWKV-7 (2025)MatrixL2L2GDMt=diag(αt)(I-βtktkT)Mt-1+βtvtktT
DeltaProduct (2025)MatrixL2L2MGD*Mt=(αtΠi=1n(I-βt,ikt,i)T)Mt-1+Σj=1nΠi=j(I-βt,ivtj,kj,i)
Deep Memory
TTT-MLP (2024)2-layer MLPL2-GDMt=Mt-1-η∇L(Mt-1;kt, vt)
Titans-LMM (2024)k-layer MLPL2L2GD + MomentumMt=αMt-1-St, where St=ηSt-1-θt∇L(Mt-1;kt, vt)
MONETA (ours)2-layer MLPLpLqGDAt=AtA1-ηt∇lp(Wt-1;kt, vt), Wt=At/||At||q-2
YAAD (ours)2-layer MLPHuberL2GDWt=atWt-1-(ηt∇ε2(Wt-1;kt, vt) if ||M(kt)-vt|≤δt, ηtδt∇ε1(Wt-1;kt, vt) Otherwise.
MEMORA (ours)2-layer MLPL2KLGDWt=Softmax(αt log(Wt-1)-ηt∇ε2(Wt-1;kt, vt))
+ +* is using multiple rounds of GD per token. +For the sake of clarity, we use L2 for all modified L2-like regularizations. However, in fact, only Titans and RWKV-7 are using L2 retention gate (see Section 4) + +Deep Memory Module: Titans and Test Time Training. To overcome the limited memory and to extend the effective context length of deep sequence models, more recent studies focus on a new generation of architectures with deep memory module (Behrouz et al. 2024c; Sun et al. 2024). These architectures are built on the meta-learning perspective, where the memory is an MLP architecture that is updated using gradient descent (with momentum) (Behrouz et al. 2024c; Sun et al. 2024). Sun et al. (2024) further provide a unifying perspective that how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models. Recently, in a concurrent work to ours, Wang et al. (2025) show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss. It, however, still remains unanswered that "What is the underlying design framework behind these sequence models that can accurately unify existing architectures?" Moreover, the role of forget gates and its alternative choices in modern sequence models is surprisingly less explored. + +# 3 Associative Memory, Attentional Bias, and Retention + +Associative memory, which is an inseparable component of learning in humans (Terry 2017), has been the inspiration for many artificial neural architectures in the literature (Behrouz et al. 2024c; Hopfield 1982; Neil et al. 2017). These studies, however, define instances of the concept of associative memory, limiting the architecture to a specific class of similarity metrics between entities (i.e., keys and values). That is, broadly speaking, associative memory is an operator that maps a set of keys $K$ to a set of values $V$ , and so to learn the underlying mapping patterns in data, it requires an objective that targets a type of memory and measures the quality of learned mappings: + +Definition 3.1 (Associative Memory and Attentional Bias). Given a set of keys $\mathcal{K} \subseteq \mathbb{R}^{d_k}$ and values $\mathcal{V} \subseteq \mathbb{R}^{d_o}$ , associative memory is an operator $\mathcal{M}: \mathcal{K} \to \mathcal{V}$ . Learning the mapping of associative memory is based on an objective $\mathcal{L}$ , called + +Attentional Bias, that determines the type of memory and its tendency to prioritize some events: + +$$ +\mathcal {M} ^ {*} = \arg \min _ {\mathcal {M}} \quad \mathcal {L} (\mathcal {M} (\mathcal {K}); \mathcal {V}). \tag {4} +$$ + +A few remarks are in order: + +Remark 1. When we parameterize the memory with parameter $W$ , we use $\mathcal{M}(W, \mathbf{k})$ . In this parametric setting, the optimization problem in (4) should be performed over the parameter $W$ . Furthermore, in the parametric setup, we might use an additional regularization $\mathcal{R}(W)$ to control the retaining of the past data. + +Remark 2. Learning the mapping between keys and values (Equation 4) is a meta-learning problem, in which the attentional bias is optimized in the inner-loop and all other parameters of the neural network (e.g., linear projections, convolutions, etc.) are optimized in the outer-loop. Therefore, the model learns how to store the data into its parameters at test time (Behrouz et al. 2024c; Sun et al. 2024). + +# 3.1 Learning to Memorize and to Retain Through the Lens of Optimization + +Definition 3.1 translates the design of a neural architecture based on the concept of associative memory to learning the underlying mapping between keys and values, by minimizing an objective $\mathcal{L}$ . To optimize Equation 4, one simple approach is to utilize the idea of gradient descent. Specifically, given a new pair of keys and values, we update the memory as: + +$$ +W _ {t} = W _ {t - 1} - \eta_ {t} \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right), \tag {5} +$$ + +where, for simplicity, we use the definition $\ell(W_{t-1}; \mathbf{k}_t, \mathbf{v}_t) \coloneqq \mathcal{L}(\mathcal{M}(W; \mathbf{k}_t), \mathbf{v}_t)$ . Behrouz et al. (2024c) re-interpret the formulation as a momentary surprise metric, where the model memorizes tokens that violates the expectation of the objective (i.e., being surprising to the memory). Although the choice of objective is an important step to fully interpret Equation 5 (which we discuss in detail in Section 5), there are different viewpoints to interpret this update rule in its general format, which later can help us to go beyond existing architectures: + +# 3.2 Viewpoint 1: Online Regression and Follow-The-Regularized-Leader + +Equation (5) can be viewed as one step of online gradient descent over the sequence of the loss functions + +$$ +\ell \left(W; \mathbf {k} _ {1}, \mathbf {v} _ {1}\right), \ell \left(W; \mathbf {k} _ {2}, \mathbf {v} _ {2}\right), \dots , \ell \left(W; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right), \dots . \tag {6} +$$ + +It is well known that the online gradient descent can be viewed as a special case of Follow-The-Regularized-Leader (FTRL) algorithm with a special choice of loss functions (Shalev-Shwartz et al. 2012, Chapter 2) and (Hazan et al. 2016). Specifically, assuming $W_0 = 0$ , the update rule in (5) is equivalent to + +$$ +W _ {t} = \arg \min _ {W} \quad \sum_ {i = 1} ^ {t} \left\langle W - W _ {i - 1}, \nabla \ell \left(W _ {i - 1}; \mathbf {k} _ {i}, \mathbf {v} _ {i}\right) \right\rangle + \frac {1}{2 \eta} \| W \| _ {2} ^ {2}, \tag {7} +$$ + +where the term $\langle W - W_{i-1}, \nabla \ell(W_{i-1}; \mathbf{k}_i, \mathbf{v}_i) \rangle$ is the local linear approximation of the original loss at time $i$ and the second term is a regularization term. While the first part $\sum_{i=1}^{t} \langle W - W_{i-1}, \nabla \ell(W_{i-1}; \mathbf{k}_i, \mathbf{v}_i) \rangle$ measures how well can the memory learn all the past tokens, the second term $\frac{1}{2\eta} \|W\|_2^2$ penalizes the memory update with respect to the size of memory. + +Equation (7) uses linear approximation of the loss function and quadratic regularization. We can, however, in principle use other approximations of the loss function as well as other regularization functions, as used in the past in online optimization (Hazan et al. 2016; Shalev-Shwartz et al. 2012) or in general optimization (Miral 2015; Razaviyayn et al. 2013). Such changes are the idea behind the development of other optimization algorithms such mirror descent. More specifically, we can generalize the update rule in (7) to the form: + +$$ +W _ {t} = \arg \min _ {W \in \mathcal {W}} \underbrace {\sum_ {i = 1} ^ {t} \widehat {\ell_ {i}} (W ; \mathbf {k} _ {i} , \mathbf {v} _ {i})} _ {\text {A t t e n t i o n a l B i a s}} + \underbrace {\frac {1}{\eta_ {t}} \mathcal {R} _ {t} (W)} _ {\text {M e m o r y S t a b i l i t y}}. \tag {FTRLViewpoint} +$$ + +In this update rule, the term $\sum_{i=1}^{t} \widehat{\ell}_i(W; \mathbf{k}_i, \mathbf{v}_i)$ aims at memorizing the tokens at test time, while the term $\mathcal{R}_t(W)$ regularizes the learning dynamics and take the size of the memory into account when updating it by a new incoming data. Choosing different loss functions $\widehat{\ell}_i(W; x_i)$ and the regularization term $\frac{1}{\eta_t} \mathcal{R}_t(W)$ can lead to different algorithms such as (online) gradient descent or mirror descent. In this generalization, $\eta_t$ to can be data-dependent. Moreover, we will allow imposing constraint $\mathcal{W}$ on the choice $W$ . + +# 3.3 Viewpoint 2: Learning the Latest Token While Retaining Previous Information + +Another way to interpret the update rule (5) is to view it as learning from the latest key-value pair $(\mathbf{k}_i, \mathbf{v}_i)$ (via using its gradient or surprise metric), while staying close to the previous state $W_{t-1}$ to retain the previously memorized tokens. Formally, (5) is equivalent to + +$$ +W _ {t} = \arg \min _ {W} \left\langle W - W _ {t - 1}, \nabla \ell (W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}) \right\rangle + \frac {1}{2 \eta_ {t}} \left\| W - W _ {t - 1} \right\| _ {2} ^ {2} +$$ + +The first term locally approximates $\ell(W; \mathbf{k}_t, \mathbf{v}_t)$ around the previous state $W_{t-1}$ , while the last term regularizes deviations from $W_{t-1}$ . This form can generalize to + +$$ +W _ {t} = \arg \min _ {W \in \mathcal {W}} \underbrace {\widetilde {\ell_ {t}} (W ; \mathbf {k} _ {t} , \mathbf {v} _ {t})} _ {\text {A t t e n t i o n a l B i a s}} + \underbrace {\operatorname {R e t} _ {t} (W , W _ {t - 1})} _ {\text {R e t e n t i o n}}, \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \text {(L e a r n i n g - R e t a i n i n g V i e w p o i n t)} +$$ + +where the term $\widetilde{\ell_t} (W;\mathbf{k}_t,\mathbf{v}_t)$ is an approximation of $\ell (W;\mathbf{k}_t,\mathbf{v}_t)$ and minimizing it corresponds to Learning from the new concepts $(\mathbf{k}_t,\mathbf{v}_t)$ . The second term $\mathrm{Ret}_t(W,W_{t - 1})$ regularizes the changes in $W$ to make the learning dynamics stable and to retain previously learned knowledge. This Retention function may have local and global components: + +$$ +\operatorname {R e t} _ {t} \left(W, W _ {t - 1}\right) = \underbrace {\frac {1}{\eta_ {t}} \mathrm {D} _ {t} \left(W , W _ {t - 1}\right)} _ {\text {L o c a l R e t e n t i o n}} + \underbrace {\frac {1}{\alpha_ {t}} \mathrm {G} _ {t} \left(W\right)} _ {\text {G l o b a l R e t e n t i o n}}. +$$ + +Here, the term $\mathrm{D}_t(W, W_{t-1})$ , which is a premetric that controls the deviations from $W_{t-1}$ , aims at retaining previously learned knowledge. The coefficient $\eta_t$ can be viewed as a meta in-context learning rate, where larger values of $\eta_t$ leads to learning more from new concepts, while allowing higher forgetting of previously learned concepts. The second term is a global retention that controls the change of the memory with respect to its size. The special instances of the above viewpoint (e.g., without global retention, with implicit closed-form solution, and/or with limited memory structure) have been the motivation behind some of the recent studies such as Liu et al. (2024a). + +# 3.4 Further Discussions on the Two Viewpoints + +The (FTRL Viewpoint) and (Learning-Retaining Viewpoint) are connected through the lens of online optimization. For example, as discussed above, by choosing linear approximation of the loss and quadratic regularization/retention, they can both cover online gradient descent update in (5) as a special case. One straightforward way to make the connection explicit is by defining the premetric $\mathrm{D}_t(W;W^{\prime})$ based on the previous loss functions and the regularization, as described in Proposition 3.2 below: + +Proposition 3.2. Let $\eta_t = \eta$ and define $h_t(W) \coloneqq \sum_{i=1}^{t-1} \widehat{\ell}_i(W; \mathbf{k}_i, \mathbf{v}_i) + \frac{1}{\eta} R(W)$ . Assume $\mathcal{W} = \mathbb{R}^d$ and the function $h_t(W)$ is strictly convex in $W$ and let $\mathcal{D}_h(\cdot, \cdot)$ be the Bregman divergence defined by function $h(\cdot)$ , i.e., $\mathcal{D}_h(W, W') = h(W) - h(W') - \langle \nabla h(W'), W - W' \rangle$ . Set $Ret_t(W, W') = \mathcal{D}_h(W, W')$ and $\widetilde{\ell}_t(W; x_t) = \widehat{\ell}_t(W; x_t)$ in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint). + +We provide the proof in Appendix B. The above proposition shows that (Learning-Retaining Viewpoint) can also explain the approaches obtained by (FTRL Viewpoint), under some mild assumptions. Hence, (Learning-Retaining Viewpoint) may be seen as a more general version. This is why we focus on this viewpoint in most of our derivations in the next sections. + +Remark 3. Given the above viewpoint, we can see that even by using additional global regularization there is no memory erasing or forgetting process (a common term in modern architectures (Behrouz et al. 2024c; Yang et al. 2024a)) but the model might decide to not retain the past state of the memory. Interestingly, this observation also matches the human memory process, where brain does not erase memories but they might become inaccessible due to retrieval failures (Robertson 2002). Therefore, instead of calling it a forget gate, later on, we use "Retention Gate" to refer to this term. + +Remark 4. As we discuss in Section 4 and summarize in Table 1, most existing modern sequence models are optimizing associative memory objective (attentional bias in Equation 4) using gradient descent. Therefore, to provide further intuition about the connection of existing sequence models as well as their online learning interpretations, we discuss the above two viewpoints that are limited to gradient descent-based update rules. Our initial definition of attentional bias and associative memory in Equation 4, however, is broader and can be optimized by any optimization algorithm (e.g., even Newton's method, or non-parametric solutions). + +# 4 MirAs: Learning to Memorize with Robust and Expressive Memory + +Building upon our definition of associative memory, attentional bias, and previous viewpoints, we present MIRAs framework that not only accurately unifies existing backbone architectures but it also provides insights on how to design the next generation of sequence models. As discussed earlier in Section 3, learning an associative memory can be interpreted as a meta-learning task, in which the associative memory learns how to compress and store data into its parameters at test time. The architecture of the memory in such tasks is particularly important as in longer contexts, the expressivity of the memory structure can limit its ability to learn the underlying patterns. Therefore, the first choice to design a sequence model is the structure of the memory. Given the structure of the memory, parameterized by a set of parameters $W$ , as discussed earlier, we aim to minimize a loss function $\ell(W; \cdot, \cdot)$ with a retention regularizer $\mathrm{Ret}(\cdot)$ via a learning algorithm (e.g., gradient descent). Accordingly, MIRAs requires four design choices: + +1. Memory Structure: This choice specifies the architecture of the memory. For example, this architecture can be a vector, a linear function, a Multilayer Perceptron (MLP) layer, or even more complex structures. We may restrict the choice of $W$ to be within a certain region, e.g., $W$ to lie within an $L_{2}$ ball to avoid infinite values or unstable training. +2. Attentional Bias: A key choice is the attentional bias objective $\mathcal{L}(\cdot)$ in Equation 4. We can even consider different approximations of the loss function, (e.g., $\widehat{\ell} (\cdot ,\cdot)$ in (FTRL Viewpoint) or $\widetilde{\ell} (\cdot ,\cdot)$ in (Learning-Retaining Viewpoint)). The choice of attentional bias determines how memory memorizes the context, maps the inputs, and prioritizes the events. +3. Memory Stability and Retention: Another key choice is the retention regularizer $\mathcal{R}(\cdot)$ (e.g., $\mathcal{R}_t(\cdot)$ in (FTRL Viewpoint) and $\mathrm{Ret}_t(\cdot)$ in (Learning-Retaining Viewpoint)). In parametric setups, this choice balances learning with retention of past state. An effective retention gate is key to the good performance in long context tasks. +4. Memory Algorithm: Finally, this choice specifies the learning algorithm that we use to optimize the memory objective. One may use gradient descent, gradient descent with momentum, or any other algorithm (including finding non-parametric solutions). + +The above choices are major design choices for designing backbone sequence models in neural architectures. There are, however, minor decisions that can distinguish models; i.e., data-dependent or independent parameters, scalar or channel-wise learning rate/retaining gate, etc. Next, we discuss the overview of how existing architectures fit into MIRAS framework. + +RNNs with Hebbian Rule. The first generation of modern recurrent architectures (e.g., Linear attention (Katharopoulos et al. 2020), RetNet (Sun et al. 2023), Mamba (Gu et al. 2024), and GLA (Yang et al. 2024b)) are based on Hebbian-like (e.g., gated Hebbian) learning rule (Hebb 2005). We let attentional bias be the dot product similarity. That is, given a memory $\mathcal{M} \in \mathbb{R}^{d \times n}$ and $\mathbf{k}, \mathbf{v} \in \mathbb{R}^d$ , we define $\tilde{\ell}_t \coloneqq -2\langle \mathcal{M}_t \mathbf{k}_t, \mathbf{v}_t \rangle$ and local retention as $\mathrm{Ret}_t(\mathcal{M}, \mathcal{M}_{t-1}) = \| \mathcal{M}_t - \alpha \mathcal{M}_{t-1} \|_F^2$ . Using Equation Learning-Retaining Viewpoint and gradient descent as the optimizer (i.e., memory learning algorithm), the memory update rule is: + +$$ +\mathcal {M} _ {t} = \alpha \mathcal {M} _ {t - 1} + \mathbf {v} _ {t} \mathbf {k} _ {t} ^ {\top}. \tag {8} +$$ + +When (1) $\alpha = 1$ , memory update is equivalent to Linear Attention (LA) (Katharopoulos et al. 2020); (2) $\alpha \in \mathbb{R}$ is a learnable parameter, resulting architecture is either lightening attention ( $n > 1$ ) (Li et al. 2025) or RetNet ( $n = 1$ ) (Sun et al. 2023); and (3) $\alpha_{t} \in \mathbb{R}$ are data-dependent learnable parameters, resulting sequence model is Mamba2 (Dao et al. 2024). + +RNNs with Delta Rule. To improve the memory management and to enhance the memory capacity of the above group, several studies suggest using delta rule (Neil et al. 2017; Schlag et al. 2021) as the learning algorithm in recurrent neural networks (e.g., DeltaNet (Schlag et al. 2021), Longhorn (Liu et al. 2024a), and RWKV7 (Peng et al. 2025b)). In this part, we recall that where $\mathcal{M} \in \mathbb{R}^{d \times n}$ , delta rule is equivalent to optimizing MSE objective $\| \mathcal{M}_t \mathbf{k}_t - \mathbf{v}_t \|_2^2$ with $\mathrm{Ret}_t(\mathcal{M}, \mathcal{M}_{t-1}) = \| \mathcal{M}_t - \alpha \mathcal{M}_{t-1} \|_F^2$ as local retention, and stochastic gradient descent as optimizer: ( $\eta_t$ is defined in Equation Learning-Retaining Viewpoint) + +$$ +\mathcal {M} _ {t} = \alpha \left(\mathbf {I} - \eta_ {t} \mathbf {k} _ {t} \mathbf {k} _ {t} ^ {\top}\right) \mathcal {M} _ {t - 1} + \mathbf {v} _ {t} \mathbf {k} _ {t} ^ {\top}. \tag {9} +$$ + +When (1) $\alpha = 1$ , memory update is equivalent to DeltaNet (Schlag et al. 2021); and (2) $\alpha_{t} \in \mathbb{R}^{m}$ are data-dependent learnable parameters, resulting sequence model is either Gated DeltaNet (Yang et al. 2024a) ( $m = 1$ ), or RWKV7 (Peng et al. 2025b) ( $m = d$ ). Therefore, RNNs with delta rule are special instances of MIRAS. + +Beyond Delta Rule. As discussed earlier, while delta rule with its value replacement strategy is more powerful than Hebbian-like learning rules, it suffers from theoretical limitations (Irie et al. 2023) and achieves moderate performance in practice (Yang et al. 2024c). Therefore, several studies have focused on update rules beyond delta rule. Recently, Titans (Behrouz et al. 2024c) suggests using non-linear MSE objective of $\| \mathcal{M}_t(\mathbf{k}_t) - \mathbf{v}_t\| _2^2$ with both local and global retention of $\mathrm{D}_t = \| W_t - W_{t - 1}\| _F^2$ and $\mathrm{G}_t = \| W_t\| _2^2$ and optimize it with gradient descent with momentum $^2$ . Therefore, Titans-LMM is a special instance of MIRAs, where we use the abovementioned attentional bias and retention regularizations, and gradient descent with momentum as the optimizer. + +Another example of such models is Mesa-layer, in which the model uses $\sum_{i=1}^{t} \|\mathcal{M}_{t}(\mathbf{k}_{i}) - \mathbf{v}_{i}\|_{2}^{2}$ as the attentional bias objective with $\|\mathcal{M}_{t}\|_{2}^{2}$ as the retention regularization. Since these models use Newton's method to optimize such an objective, they provide a more expressive update rule than delta rule. We further discuss a set of new learning algorithms beyond delta rule in Section 5. + +Attention. As discussed by Sun et al. (2024), softmax attention is a non-parametric solution of $\ell_2$ -MSE loss function (i.e., $\| W\mathbf{k} - \mathbf{v}\| _2^2$ ) with Nadaraya-Watson estimator. Therefore, softmax attention is an instance of MIRAS, when we find the non-parametric solution to the MSE loss with Nadaraya-Watson estimator, without retention. + +# 5 Beyond Existing Attentional Biases and Retention Gates + +As discussed in the previous section, existing work focuses only on linear/quadratic choices for the attentional bias or retention gate. In particular, the loss function $L(\mathcal{M}(\mathbf{k}_t),\mathbf{v}_t)$ is defined as $L(\mathcal{M}(\mathbf{k}_t),\mathbf{v}_t) = c_t\| \mathcal{M}(\mathbf{k}_t) - \mathbf{v}_t\|^2$ for some (learnable) constant $c_{t}$ in prior work. Also the regularization term $R_{t}(W)$ or the parametric $D_{t}$ is considered as a quadratic/linear function. In addition, almost all prior work considers $W$ to be the entire $\mathbb{R}^d$ space. However, in general there could be various choices for all the three aforementioned design choices. To illustrate the potential and flexibility of our designed framework, here, we review some of the potential design choices for attentional bias and retention gate in MirAS. For the sake of clarity, we discuss all these attentional bias and memory retention gates based on using gradient descent as the optimizer, and so based on the provided two view points. However, these attentional bias objectives and retention regularizers can be directly used in Equation 4 and optimized by using any other optimization algorithms, resulting in different update rules. + +# 5.1 Alternative Attentional Biases + +Variant 1: $\ell_p$ -Attentional Bias. As discussed in the main body, attentional bias defines the "similarity metric" and measures how well memory can recall the value, given its corresponding key. Although $\ell_2$ regression loss often is a natural choice, it is sensitive to noise in the data. A natural extension is to use $\ell_p$ -norm class of objectives. That is, let $\mathcal{M}$ be the memory, $\mathbf{k}$ be the keys, and $\mathbf{v}$ be the values, we define $\ell_p$ -attentional bias as: + +$$ +\mathcal {L} \left(\mathcal {M} \left(W, \mathbf {k} _ {t}\right); \mathbf {v} _ {t}\right) = \| \mathcal {M} \left(\mathbf {k} _ {t}\right) - \mathbf {v} _ {t} \| _ {p} ^ {p}, \tag {10} +$$ + +where $p \in \mathbb{R}^{\geq 1}$ and $\| . \|_p$ is the $p$ -norm. Although depending on the distribution of the data, we might want to use different values of $p$ (see Section 6), different values of $p$ can result in memory architectures with interesting properties. For the sake of simplicity, let memory be a matrix, i.e., $W \in \mathbb{R}^{m \times d}$ and $\mathcal{M}(W, \mathbf{k}_t) = W\mathbf{k}_t$ , the closed form can be derived as: + +$$ +W _ {t} = W _ {t} - \eta_ {t} \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) = W _ {t} - p \eta_ {t} \left(\operatorname {S i g n} \left(W \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \odot | W \mathbf {k} _ {t} - \mathbf {v} _ {t} | ^ {p - 1}\right) \mathbf {k} _ {t} ^ {\top}. \tag {11} +$$ + +Let $p = 1$ , the recurrence is simplified as: + +$$ +W _ {t} = W _ {t} - \eta_ {t} \operatorname {S i g n} \left(W _ {t} \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \mathbf {k} _ {t} ^ {\top}, \tag {12} +$$ + +which means that the memory has only two values of $-1$ and $1$ . We call this variation value-less associative memory, in which we store entities (keys) but map them into two extreme class of $-1$ and $+1$ . + +Remark 5. One of the critical challenges to use the above update rule is in the backpropagation process, in which $\operatorname{Sign}(\cdot)$ and $|\cdot|$ are non-differentiable and so might cause unstable training. To overcome this issue, we use $\operatorname{Sign}(x) \approx \tanh(\alpha x)$ , and $|x| = \sqrt{x^2 + \epsilon}$ , as the smooth approximators of these functions. + +One simple interpretation for such behavior (i.e., value-less memory) is similar to the coping mechanism in humans (Loftus 1993), in which the memory does not store the values for extreme events. This interpretation of protective memory in extreme events motivates our next variant. + +Variant 2: Huber Loss: Memory with Coping Mechanism. While $\ell_2$ -norm objective is a common choice for many statistical and machine learning tasks, it is known to be sensitive to outliers and extreme samples. This sensitivity extends to the use of $\ell_2$ loss for attentional bias. To address this and drawing motivation from robust regression literature, we suggest utilizing the Huber loss-type (Hastie et al. 2009; Huber 1992) as the attentional bias, thereby reducing the negative impact of the outlier data on the memory learning process. + +We can apply Huber-type loss in three different ways: The first approach is to define the summation of the Huber loss across different coordinates as the total loss, i.e., + +$$ +\ell (W; \mathbf {k} _ {t}, \mathbf {v} _ {t}) = \sum_ {j} \mathcal {H} (\mathcal {M} (W, \mathbf {k} _ {t}) _ {j} - \mathbf {v} _ {t, j}), +$$ + +where $\mathcal{M}(W,\mathbf{k}_t)_j$ and $\mathbf{v}_{t,j}$ denote the $j$ -th coordinate of $\mathcal{M}(W,\mathbf{k}_t)$ and $\mathbf{v}_t$ respectively. The function $\mathcal{H}(\cdot):\mathbb{R}\mapsto \mathbb{R}$ is the Huber loss defined as + +$$ +\mathcal {H} (a) = \left\{ \begin{array}{l l} \frac {1}{2} a ^ {2} & \text {i f} | a | \leq \delta \\ \delta \left(| a | - \frac {1}{2} \delta\right) & \text {i f} | a | > \delta . \end{array} \right. \tag {13} +$$ + +Utilizing this attentional bias can lead to various memory update rules. For example, for the matrix form memory $\mathcal{M}(W,\mathbf{k}_t) = W\mathbf{k}_t$ , the update rule is given by + +$$ +W _ {t} = W _ {t - 1} - \eta_ {t} \left[ \left(\left(W \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \mathbf {k} _ {t} ^ {T}\right) \odot \left(\mathbf {I} \left(\left| W \mathbf {k} _ {t} - \mathbf {v} _ {t} \right| \leq \delta_ {t}\right) \mathbf {1} ^ {\top}\right) + \left(\delta_ {t} \operatorname {S i g n} \left(W \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \mathbf {k} ^ {\top}\right) \odot \left(\mathbf {I} \left(\left| W \mathbf {k} _ {t} - \mathbf {v} _ {t} \right| > \delta_ {t}\right) \mathbf {1} ^ {\top}\right) \right] \tag {14} +$$ + +In this formulation, the parameter $\delta_t$ decides the type of the memory used for each block of memory ( $\ell_2$ -norm objective or value-less) based on the context, making the memory more robust to outliers. + +The second approach is to define the Huber-type loss based on the $\ell_2$ loss over all coordinates, i.e., + +$$ +\ell (W; \mathbf {k} _ {t}, \mathbf {v} _ {t}) = \mathcal {H} (\| \mathcal {M} (W, \mathbf {k} _ {t}) - \mathbf {v} _ {t} \| _ {2}). +$$ + +For simplicity of derivations, assume matrix memory $M(W,\mathbf{k}_t) = W\mathbf{k}_t$ . Then using gradient descent for updating memory leads the memory update rule + +$$ +W _ {t} = W _ {t - 1} - \eta_ {t} \left\{ \begin{array}{l l} \left(\mathcal {M} \left(W _ {t - 1}, \mathbf {k} _ {t}\right) - \mathbf {v} _ {t}\right) \mathbf {k} _ {t} ^ {T} & \text {i f} \| \mathcal {M} \left(W _ {t - 1}, \mathbf {k} _ {t}\right) - \mathbf {v} _ {t} \| _ {2} \leq \delta_ {t}, \\ \delta_ {t} \frac {\left(\mathcal {M} \left(W _ {t - 1} , \mathbf {k} _ {t}\right) - \mathbf {v} _ {t}\right)}{\| \mathcal {M} \left(W _ {t - 1} , \mathbf {k} _ {t}\right) - \mathbf {v} _ {t} \| _ {2}} \mathbf {k} _ {t} ^ {T} & \text {O t h e r w i s e .} \end{array} \right. \tag {15} +$$ + +Again, in the form (15), the parameter $\delta_t$ decides the type of the memory used ( $\ell_2$ -norm objective or normalized version) based on the context, making the memory more robust to outliers. + +Finally, in the third approach, we present a smooth mixture method, in which the memory decides if for an incoming data it is better to use $\ell_2$ or $\ell_1$ attentional bias: + +$$ +W _ {t} = W _ {t - 1} - \left\{ \begin{array}{l l} \eta_ {t} \nabla \ell_ {2} \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) & \text {i f} \| \mathcal {M} (\mathbf {k} _ {t}) - \mathbf {v} _ {t} \| \leq \delta_ {t}, \\ \eta_ {t} \delta_ {t} \nabla \ell_ {1} \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) & \text {O t h e r w i s e .} \end{array} \right. \tag {16} +$$ + +The role of parameter $\delta_t$ is the same as above. + +Variant 3: Memory Robust to Value Shifts. Following the robustness requirement discussed in the previous section, we aim to design a memory mechanism that exhibits resilience against small shifts in the value parameter. A natural approach in this context is to employ a robust optimization formulation. Specifically, we define the loss function as the worst-case $\ell_2$ distance between the predicted memory output and the perturbed true value: + +$$ +\mathcal {L} \left(\mathcal {M} \left(W, \mathbf {k} _ {t}\right); \mathbf {v} _ {t}\right) = \max _ {\| \delta \mathbf {v} _ {t} \| _ {2} \leq \Delta} \frac {1}{2} \| \mathcal {M} \left(W, \mathbf {k} _ {t}\right) - \left(\mathbf {v} _ {t} + \boldsymbol {\delta} \mathbf {v} _ {t}\right) \| _ {2} ^ {2}. \tag {17} +$$ + +This formulation seeks the memory parameters $W$ that perform well even under the adverse local perturbation of the true value $\mathbf{v}_t$ within an $\ell_2$ ball of radius $\Delta$ . To solve the maximization problem in (17), we find the optimal perturbation $\delta \mathbf{v}_t^*$ . By solving this problem with respect to $\delta \mathbf{v}_t$ , we arrive at: + +$$ +\delta \mathbf {v} _ {t} ^ {*} = \Delta \frac {- \mathcal {M} (W , \mathbf {k} _ {t}) + \mathbf {v} _ {t}}{\| \mathcal {M} (W , \mathbf {k} _ {t}) - \mathbf {v} _ {t} \| _ {2}} +$$ + +Substituting this optimal perturbation back into the loss function (17), we obtain the robust loss: + +$$ +\mathcal {L} \left(\mathcal {M} \left(W, \mathbf {k} _ {t}\right); \mathbf {v} _ {t}\right) = \frac {1}{2} \| \mathcal {M} \left(W, \mathbf {k} _ {t}\right) - \mathbf {v} _ {t} \| _ {2} ^ {2} + \Delta \| \mathcal {M} \left(W, \mathbf {k} _ {t}\right) - \mathbf {v} _ {t} \| _ {2} + \frac {1}{2} \Delta^ {2}. +$$ + +This robust loss function is a combination of the standard $\ell_2$ loss and a term proportional to the $\ell_2$ norm of the error, scaled by the robustness parameter $\Delta$ . The value of $\Delta$ thus controls the trade-off between fitting the nominal data and ensuring robustness against value perturbations. + +For simplicity of the derivations, let us consider a constant value for $\Delta$ , an Euclidean retention gate $\mathrm{Ret}_t(W,W_{t - 1}) = \| W - W_{t - 1}\|^2$ , and an attentional bias term $\widetilde{\ell} (W;\mathbf{k}_t,\mathbf{v}_t) = \langle W - W_{t - 1},\nabla \ell (W_{t - 1};\mathbf{k}_t,\mathbf{v}_t)\rangle$ . Furthermore, to simplify the memory operation, we assume a linear matrix memory model $\mathcal{M}(W,\mathbf{k}_t) = W\mathbf{k}_t$ . Under these assumptions, we can derive the memory update mechanism using gradient descent on the robust loss: + +$$ +W _ {t} = W _ {t - 1} - \eta \left(\left(\mathcal {M} \left(W _ {t - 1}, \mathbf {k} _ {t}\right) - \mathbf {v} _ {t}\right) \mathbf {k} _ {t} ^ {\top} + \Delta \frac {\mathcal {M} \left(W _ {t - 1} , \mathbf {k} _ {t}\right) - \mathbf {v} _ {t}}{\| \mathcal {M} \left(W _ {t - 1} , \mathbf {k} _ {t}\right) - \mathbf {v} _ {t} \| _ {2}} \mathbf {k} _ {t} ^ {\top}\right) +$$ + +In this update rule, the parameter $\Delta$ , which governs the influence of the robustness term, can also be treated as a learnable parameter, allowing the model to adapt its robustness based on the observed data. + +# 5.2 Alternative Retention Gates + +Variant 1: Memorization Over A Scaled Probability Simplex Via $f$ -Divergence. A common technique in learning to prevent numerical instabilities and exploding values is to restrict the search space to a bounded domain. Following this principle, to avoid numerical instabilities, we can constrained the variable $W_{t}$ to lie within a (scaled) probability simplex. In other words, we can restrict the state to lie in the constraint set + +$$ +\mathcal {W} = \{W \mid \| W \| _ {1} = c \text {a n d} W _ {j l} \geq 0, \forall j, l \}. +$$ + +In this set, each matrix $W$ can be viewed as a measure. Thus, in (Learning-Retaining Viewpoint), we can utilize divergences over measures to define our premetric. For example, we can use $f$ -divergence measure (Polyanskiy et al. 2025, Def 4.9), (Csiszar 1967) to define $\mathrm{D}_t(\cdot, \cdot)$ . More specifically, let $f(\cdot)$ be a smooth strictly convex function from $\mathbb{R}^+$ to $\mathbb{R}$ with $f(1) = 0$ . Then, we can define the $f$ -divergence between $W$ and $W'$ as + +$$ +\mathrm {D} _ {t} (W, W ^ {\prime}) = \sum_ {j l} W _ {j l} ^ {\prime} f \left(\frac {W _ {j l}}{W _ {j l} ^ {\prime}}\right). +$$ + +It is known that $f$ -divergence is zero if and only if $W = W'$ ; see Polyanskiy et al. 2025, Theorem 2.3. Using the above premetric as the retention gate and setting $\widetilde{\ell}(W; \mathbf{k}_t, \mathbf{v}_t) = \langle W - W_{t-1}, \nabla \ell(W_{t-1}; \mathbf{k}_t, \mathbf{v}_t) \rangle$ in (Learning-Retaining Viewpoint), we get the update rule + +$$ +W _ {t} = W _ {t - 1} \odot g \left(- \zeta_ {t} - \eta_ {t} \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right)\right). \tag {18} +$$ + +Here $g(\cdot)$ is the inverse of the mapping $f'$ , i.e., $g(f'(\tau)) = \tau$ , $\forall \tau$ ; the operator $\odot$ denotes the Hadamard (elementwise) product, and $\zeta_t$ should be chosen such that $\| W_t\|_1 = c$ . Notice that since the function $f(\cdot)$ is strictly convex and smooth, its derivative is strictly increasing and hence $g(\cdot)$ is well defined. Conversely, for any strictly monotone function $g(\cdot)$ , we can find its inverse function $g^{-1}$ (which is strictly increasing) and define $f(\tau) = \mathrm{const} + \int_{\tau' = 0}^{\infty}g^{-1}(\tau')d\tau'$ . The term const should be chosen such that $f(1) = 0$ . Then the update rule in (18) can be interpreted by the $f$ -divergence regularization, as explained above. Therefore, one can directly choose a continuous monotonically increasing function $g(\cdot)$ and use (18) for memory update. + +Specializing to KL divergence. Let us further make the above update rule explicit by using special function $f$ . If we choose $f(\tau) = \tau \ln(\tau)$ , then the $f$ -divergence becomes the widely used KL divergence measure $D_t(W, W_{t-1}) = \sum_{jl} W_{jl} \log \left( \frac{W_{jl}}{(W_t)_{jl}} \right)$ . In addition, we can also utilize the Shannon entropy as the global retention by regularizing deviations from uniform distribution, i.e., $G_t(W) = \sum_{jl} W_{jl} \log (W_{jl})$ . Combining these choices of the local and global retention gates, we obtain the overall retention gate + +$$ +\operatorname {R e t} _ {t} (W, W _ {t - 1}) = \frac {1}{\eta_ {t}} \sum_ {j l} W _ {j l} \log \left(\frac {W _ {j l}}{\left(W _ {t}\right) _ {j l}}\right) + \frac {1}{\alpha_ {t}} \sum_ {j l} W _ {j l} \log \left(W _ {j l}\right) +$$ + +Choosing the attentional bias $\widetilde{\ell}(W; \mathbf{k}_t, \mathbf{v}_t) = \langle W - W_{t-1}, \nabla \ell(W_{t-1}; \mathbf{k}_t, \mathbf{v}_t) \rangle$ and the above retention gate will lead to the update rule + +$$ +W _ {t} = \arg \min _ {W} \left\langle W - W _ {t - 1}, \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) \right\rangle + \frac {1}{\eta_ {t}} \sum_ {j l} W _ {j l} \log \left(\frac {W _ {j l}}{\left(W _ {t}\right) _ {j l}}\right) + \frac {1}{\alpha_ {t}} \sum_ {j l} W _ {j l} \log \left(W _ {j l}\right) \tag {19} +$$ + +$$ +\text {s . t .} \quad \sum_ {j l} W _ {j l} = c, W _ {j l} \geq 0, \forall j l \tag {20} +$$ + +Attaching the Lagrange multiplier to the first constraint, the KKT conditions imply + +$$ +\left(\nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right)\right) _ {j l} + \left(\frac {1}{\eta_ {t}} + \frac {1}{\alpha_ {t}}\right) \left(1 + \log W _ {j l}\right) - \frac {1}{\eta_ {t}} \log \left(\left(W _ {t - 1}\right) _ {j l}\right) + \mu_ {t} = 0, \quad \forall j, l +$$ + +where $\mu_t$ should be chosen such that $\sum_{jl} W_{jl} = c$ . Rearranging the terms and defining $\lambda_t = \frac{1 / \alpha_t}{1 / \alpha_t + 1 / \eta_t}$ , $\eta_t' = \frac{1}{1 / \alpha_t + 1 / \eta_t}$ , we get the update rule + +$$ +W _ {t} \leftarrow c \operatorname {S o f t m a x} \left(\left(1 - \lambda_ {t}\right) \log \left(W _ {t - 1}\right) - \eta_ {t} ^ {\prime} \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right)\right) \tag {21} +$$ + +where $\lambda_t \in (0,1)$ and $\eta' \in \mathbb{R}^+$ are the parameters that can be learned during training. The Softmax operator ensures that the output lies in the set $\mathcal{W}$ . + +Notice that while all above calculations are done for a matrix $W$ , similar update rule holds for other forms of parameters such as when $W$ is a neural network (or when the parameter $W$ is normalized per slice). + +Variant 2: Elastic Net Regularization: Hard and Soft Forgetting. Elastic net is a powerful and popular tool in regression analysis to balance the feature selection capabilities of LASSO (Tibshirani 1996) and bias reduction properties of Ridge regression (Hilt et al. 1977; Hoerl et al. 1970). It has been widely used in different applications due to its ability to handle high-dimensional data and mitigate the effects of multicollinearity. Given this success, a natural question is what happens if we use this regularization scheme in our context. + +Let us start based on (Learning-Retaining Viewpoint) to design our memorization scheme. In (Learning-Retaining Viewpoint), we discussed that the loss function $\widetilde{\ell_t} (W;\mathbf{k}_t,\mathbf{v}_t)$ is an approximation of the original function $\ell (\cdot)$ , measuring our goodness-of-fit. Regularizing this loss with elastic net regularizer, we obtain the approximation + +$$ +\widetilde {\ell} _ {t} (W; \mathbf {k} _ {t}, \mathbf {v} _ {t}) = \langle W - W _ {t - 1}, \nabla \ell (W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}) \rangle . +$$ + +with a global retention of $\mathrm{G}_t(W) = \frac{1}{2\beta} \| W\| _2^2 +\frac{1}{\alpha}\| W\| _1$ . To fully specify the update rule of (Learning-Retaining Viewpoint), we also need to specify the premetric functions $\mathrm{D}_t(\cdot ,\cdot)$ . For the sake of keeping the update rule simple (and parallelizable), we can choose + +$$ +\mathrm {D} _ {t} (W, W _ {t - 1}) = \frac {1}{2} \| W - W _ {t - 1} \| _ {2} ^ {2}. +$$ + +These choices of the attentional bias and retention gate leads to the following update rule: + +$$ +W _ {t} = \mathcal {S} _ {Y} \left(\lambda W _ {t - 1} - \zeta \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right)\right), \tag {22} +$$ + +where $\gamma = \frac{\eta\beta}{\alpha(\eta + \beta)}$ , $\lambda = \frac{\beta}{\beta + \eta}$ , $\zeta = \eta\lambda$ , and $S_{\gamma}$ is the soft thresholding operator, applied element-wise. For each element, this operator is defined as + +$$ +\mathcal {S} _ {\gamma} (z) = \operatorname {s i g n} (z) \max \left\{0, | z | - \gamma \right\}. +$$ + +In other words, for large values of $z$ , $S_{\gamma}(z)$ makes $z$ closer to zero by $\gamma$ amount. If it is already in the $\gamma$ -vicinity of zero, then it makes it zero (hard forget). + +Equation (22) can be viewed as a combination of soft forgetting (obtained by multiplying $W$ by $\lambda \in (0,1)$ , and a hard forgetting (if it is smaller than $\gamma$ ). The hyperparameters $\gamma, \lambda,$ and $\zeta$ can be learned. Notice that since the shrinkage operator is not differentiable, we can approximate it with its smooth approximation. For example, we can use $S_{\gamma}(z) \approx \frac{|z|*\arctan(z / \gamma)}{\pi / 2}$ . + +Variant 3: Elastic Net Regularization: Forgetting via Soft-thresholding. The elastic net regularizer can also be used in the (FTRL Viewpoint). In particular, in (FTRL Viewpoint), we can set + +$$ +\frac {1}{\eta_ {t}} R _ {t} (W) = \frac {1}{\eta} \| W \| ^ {2} + \frac {1}{\alpha} \| W \| _ {1} +$$ + +and use $\widehat{\ell}(W; x_i) = \langle W - W_{i-1}, \nabla \ell(W_{i-1}; x_i) \rangle$ . Assuming initialization at $W_0 = 0$ , these choices of attentional bias and retention gate leads to the update rules: + +$$ +A _ {t} = A _ {t - 1} - \eta \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) +$$ + +$$ +W _ {t} = \mathcal {S} _ {\eta / \alpha} (A _ {t}) \tag {23} +$$ + +Here $S_{\eta /\alpha}(\cdot)$ is the soft-thresholding operator with parameter $\eta /\alpha$ , which can be smoothly as explained in Variant 1.1. + +Variant 4: General $L_{q}$ Memory Stability. Existing work is based on the retention gate choices $\mathrm{D}_t(W, W_{t-1}) = \|W - W_{t-1}\|_F^2$ or $R(W) = \|W\|_2^2$ . However, one can choose other choices of retention gate. For example, in (FTRL Viewpoint), we can choose $L_{q}$ norm as the regularizer $R(W)$ . More specifically, for $1 < q \leq 2$ , we can set + +$$ +\frac {1}{\eta_ {t}} R (W) = \frac {1}{2 \eta (q - 1)} \| W \| _ {q} ^ {2}. +$$ + +Using this retention gate and choosing $\widehat{\ell_i} (W;\mathbf{k}_t,\mathbf{v}_t) = \langle W - W_{i - 1},\nabla \ell (W_{i - 1};\mathbf{k}_t,\mathbf{v}_t)\rangle$ in (FTRL Viewpoint), leads to the update rule $W_{t} = -\eta \frac{A_{t}}{\|A_{t}\|_{p}^{p - 2}}$ , where $p = \frac{q}{q - 1}$ and $A_{t} = \sum_{i = 1}^{t}\nabla \ell (W_{i - 1};\mathbf{k}_{t},\mathbf{v}_{t})$ ; see Shalev-Shwartz et al. 2012, Section 2.6. Here, $\odot$ denotes the Hadamard (element-wise) product and $|\cdot |$ is the element-wise absolute value operator. Assuming $W_0 = 0$ , this update rule can be recursively written as: + +$$ +A _ {t} = A _ {t - 1} - \eta \nabla \ell \left(W _ {i - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right), \quad \text {a n d} \quad W _ {t} = \frac {A _ {t}}{\| A _ {t} \| _ {p} ^ {p - 2}}. +$$ + +Variant 5: Bregman Divergence as Retention Gate.. Another natural choice is to use Bregman divergence as retention gate, leading to a mirror descent-type algorithms. In particular, given a smooth strictly convex function $f(\cdot): \mathbb{R} \mapsto \mathbb{R}$ , we can define the function $F(W) = \sum_{jl} f(W_{jl})$ . Based on this choice of function $F$ , we define the Bregman divergence + +$$ +D _ {t} (W, W ^ {\prime}) = F (W) - F \left(W ^ {\prime}\right) - \langle W ^ {\prime}, W - W ^ {\prime} \rangle +$$ + +as our parametric function. Utilizing this retention gate and choosing $\widetilde{\ell}_t(W;\mathbf{k}_t,\mathbf{v}_t) = \langle W - W_{t - 1},\nabla \ell (W_{t - 1};\mathbf{k}_t,\mathbf{v}_t)\rangle$ in (Learning-Retaining Viewpoint), we obtain the update rule + +$$ +W _ {t} = g \left(- \eta \nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) + F ^ {\prime} \left(W _ {t - 1}\right)\right). +$$ + +Here, $F'$ is the mapping obtained by applying $f'(\cdot)$ (the derivative of $f$ ) element-wise to all entries of its input matrix argument. The function $g$ is the inverse of the mapping $F'(\cdot)$ , i.e., $g(F'(W)) = W$ . + +If we choose $f(\tau) = \frac{\tau^2}{2}$ , then $F'(W)$ becomes the identity mapping and so is $g$ . Therefore, the above update becomes simple gradient descent with no nonlinearity involved in the update rule. However, other choices of $f(\cdot)$ introduces additional nonlinearity in $g(\cdot)$ , which can enhance the expressivity of our memory. For example, we can choose the function $f(\cdot)$ so that its derivative becomes the inverse sigmoid function, i.e., $f'(\tau) = \ln \left( \frac{\tau}{1 - \tau} \right)$ with $f': (0,1) \mapsto \mathbb{R}$ . Since $f'(\cdot)$ is strictly increasing, then the function $f(\cdot)$ (and hence $F(\cdot)$ ) is strictly convex. Therefore, the Bregman divergence is well defined. Moreover, the inverse of the function $f'(\cdot)$ becomes the sigmoid function, i.e., $g(\tau) = \sigma(\tau) = \frac{\exp(\tau)}{1 + \exp(\tau)}$ with $g: \mathbb{R} \mapsto (0,1)$ . Then, the update of the memory becomes + +$$ +W _ {t} = \sigma \left(\ln \left(\frac {W _ {t}}{1 - W _ {t}}\right) - \eta \nabla \ell (W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t})\right), +$$ + +where $\sigma$ is the sigmoid function operated element-wise on the entries of $W$ , and the division operator $\frac{W_t}{1 - W_t}$ is also performed element-wise. This update rule guarantees that the elements of $W_t$ remain within the interval $(0, 1)$ . + +# 5.3 MIRAs's Variants: MONETA, YAAD, and MEMORA + +In the previous section we discussed different potential choices for attentional bias and retention gate to show the generality and the potential of MIRAs. In this section, building upon our framework, we present three novel sequence models, each of which designed based on a different motivation, and discuss how they can leverage fast parallel training. + +MOnETA. Given $p,q\in \mathbb{R}^{\geq 1}$ , we design $(p,q)$ -MONETA as the variant of MIRAs as follows: (1) For the choice of memory architecture, we use an MLP with 2 layers with expansion factor of 4 and GELU activation function (Hendrycks et al. 2016). We also use residual connections and layer norm, resulting in $\mathcal{M}(x) = x + \mathsf{LN}(W_1\sigma (W_2x))$ . (2) We choose $\ell_p$ -attentional bias (introduced in Equation 11) for MONETA. (3) For the choice of retention gate, we use the hybrid of $\ell_q$ retention gate $\frac{1}{2(q - 1)}\| W\| _q^2$ (see Section 5.2 for details) and the standard $\ell_2$ regularization $\frac{1}{\beta}\| W\| _2^2$ . (4) Finally, we use gradient descent as the memory learning algorithm. The above choices, result in the following recurrent formula for the memory module: + +$$ +A _ {t} = \alpha_ {t} A _ {t - 1} - \eta_ {t} \nabla \ell_ {p} \left(W _ {i - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right), \quad \text {a n d} \quad W _ {t} = \frac {A _ {t}}{\| A _ {t} \| _ {q} ^ {q - 2}}. \tag {24} +$$ + +Notably the gradient can be calculated using: + +$$ +\nabla \ell \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) = p \eta_ {t} \left(\operatorname {S i g n} \left(W \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \odot | W \mathbf {k} _ {t} - \mathbf {v} _ {t} | ^ {p - 1}\right) \mathbf {k} _ {t} ^ {\top}. \tag {25} +$$ + +We use $(p,q) = (3,4)$ . + +YAAD. Building upon our discussion on the importance of robust memory that protects itself from extreme events (tokens), we design YAAD based on Huber objective. That is, in MirAS, for the choice of memory structure, we follow MONETA and use an MLP with the same architecture as above; for the choice of attentional bias, we use Huber loss (defined in Equation 16); for the choice retention gate, for the sake of simplicity, we use a combination of local and global retention as $\mathrm{Ret}_t(W,W_{t - 1}) = \frac{1}{2\theta_t}\| W - W_{t - 1}\| _F^2 +\frac{1}{\beta_t}\| W\| _2^2$ , which is equivalent to the "forget gate" mechanism introduced by Behrouz et al. (2024c); and finally, we simply use gradient descent as the memory learning algorithm. Given the above choices, we can write the resulted memory learning process as follows: + +$$ +W _ {t} = \alpha_ {t} W _ {t - 1} - \left\{ \begin{array}{l l} \eta_ {t} \nabla \ell_ {2} \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) & \text {i f} \| \mathcal {M} (\mathbf {k} _ {t}) - \mathbf {v} _ {t} \| \leq \delta_ {t}, \\ \eta_ {t} \delta_ {t} \nabla \ell_ {1} \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) & \text {O t h e r w i s e .} \end{array} \right. \tag {26} +$$ + +Note that for improving the expressive power, in all architectures, we decouple the learning rate $\eta$ and the retention gate rate $\alpha$ , resulting in an independent parameter $\beta_{t} \in [0,1]^{d}$ . + +![](images/9cb01a969297d8878bd8358e093a6abd23c24cfb85585b92f9cd441c4a9e7943.jpg) +Figure 2: Visualization of the MirAs's variant architecture, their hybrid counterpart with SWA, and block design of MirAs layer. + +![](images/e133558c8c3bee1beea63c659fa263e8cff03265eb3eaa12333ad63238b2f34c.jpg) + +![](images/736c0aee42d8244d6bf36c4864778f73d5974662cdb103a9c534a218935a9611.jpg) + +MEMORA. Finally, in MEMORA, we use the idea of elastic net regularization (i.e., hard and soft retention). To this end, in Miras: (1) For the choice of memory architecture, similar to above variants, we use an MLP (the same architecture as the previous variants). (2) For the choice of attentional bias, we use simple $\ell_2$ regression loss. (3) For the choice of retention gate we use KL divergence as in Equation 21. (4) Finally, we optimize the memory using gradient descent, resulting in the following update rule: + +$$ +W _ {t} = \operatorname {S o f t m a x} \left(\alpha_ {t} \log \left(W _ {t - 1}\right) - \eta_ {t} \nabla \ell_ {2} \left(W _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right)\right) \tag {27} +$$ + +# 5.4 Architecture Backbone and Fast Training + +Architectural Backbone. For the architectural backbone, we fully follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a): We replace attention modules with our variants of MIRAs in Llama's macro architecture with MLPs with SwiGLU(. ) activation, rotary positional encodings (RoPE) (Su et al. 2024), and RMSNorm (Zhang et al. 2019). For MIRAs layer block, we follow the recent modern linear recurrent models (Behrouz et al. 2024c; Yang et al. 2024a), and incorporate a 1D depthwise-separable convolution layer (with kernel size of 4) after each of the query, key, and value projections. For the sake of training stability, we also use $\ell_2$ normalization to $\mathbf{q}$ and $\mathbf{k}$ . The output of MIRAs layer block is normalized and gated with a linear layer (Mehta et al. 2023). + +Channel-wise Parameters. For learnable parameters of $\eta_t, \delta_t$ and the retention gate of $\alpha_t$ we use channel-wise parametrization, i.e., $\eta_t, \delta_t, \alpha_t \in \mathbb{R}^d$ . While gaining more expressive power, this parametrization results in significant parameter increase. To mitigate this issue, following Peng et al. (2025b), we use low-rank projections to project the input into $\mathbb{R}^k$ and then to $\mathbb{R}^d$ , where $k$ is a hyperparameter (usually 32 or 64). The backbone architecture is illustrated in Figure 2. + +Hybrid Models. We also evaluate the hybrid version of Miras's variants. For hybrid models, we follow the Samba (Ren et al. 2024) architecture, in which we sequentially combine our Miras layer with Sliding Window Attention (SWA). The illustration of hybrid model Figure 2. + +Parallelizable Training. While the design of Miras's variant are theoretically well-motivated, their recurrence is non-linear, potentially making their straightforward training slow for large scales. In this section, we build upon the work of Behrouz et al. (2024c) and Sun et al. (2024) to make the training parallelizable. The main idea is to divide the sequence into + +chunks with size $b$ (usually is 16 or 64) and calculate the gradient for all tokens in the current chunk with respect to the last state of the memory in the previous chunk. That is, we use $\nabla \ell(\mathcal{M}_{t'}; \mathbf{k}_t, \mathbf{v}_t)$ instead of $\nabla \ell(\mathcal{M}_{t-1}; \mathbf{k}_t, \mathbf{v}_t)$ , where $t'$ is the last state in the previous chunk. + +Given the above trick, we can calculate all gradients at once and make the recurrence inside each chunk linear. However, to fully take advantage of accelerators, we need to reformulate the process as matrix multiplication. For MONETA, for the sake of clarity, assume $q = 2$ . We follow the same algorithm as Behrouz et al. (2024c) and expand the recurrence as follows: + +$$ +\begin{array}{l} \mathcal {M} _ {t} = \alpha_ {t} \mathcal {M} _ {t - 1} - \eta_ {t} \nabla \ell (\mathcal {M} _ {t - 1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}) \\ = \beta_ {t} \mathcal {M} _ {0} - \sum_ {i = 1} ^ {t} \eta_ {i} \frac {\beta_ {t}}{\beta_ {i}} \nabla \ell \left(\mathcal {M} _ {t ^ {\prime}}; \mathbf {k} _ {i}, \mathbf {v} _ {i}\right), \tag {28} \\ \end{array} +$$ + +where $t' = t - \mathrm{mod}(t, b)$ , and $\beta_{i} = \prod_{j=1}^{i} \alpha_{j}$ . For the sake of clarity, we focus on the first chunk, i.e., $t = b$ and so $t' = 0$ , and explain the process for the case that $\mathcal{M}_t = W_t$ is linear. The process for 2-layer MLPs and other chunks is similar. Using $\ell_p$ loss function, we have: + +$$ +\begin{array}{l} \nabla \ell \left(W _ {0}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) = p \left(\operatorname {S i g n} \left(W _ {0} \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \odot \left| W _ {0} \mathbf {k} _ {t} - \mathbf {v} _ {t} \right| ^ {p - 1}\right) \mathbf {k} _ {t} ^ {\top} \\ \Rightarrow \sum_ {i = 1} ^ {b} \eta_ {i} \frac {\beta_ {b}}{\beta_ {i}} \nabla \ell \left(W _ {0};; \mathbf {k} _ {i}, \mathbf {v} _ {i}\right) = p \mathbf {E} _ {b} \odot \mathbf {B} _ {b} \odot \operatorname {S i g n} \left(W \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \odot \left(\left| W _ {0} \mathbf {K} - \mathbf {V} \right| ^ {p - 1}\right) \mathbf {K} ^ {\top}, \tag {29} \\ \end{array} +$$ + +where $\mathbf{E}_b = \left[\eta_1\quad \eta_2\quad \dots \quad \eta_b\right]$ and $\mathbf{B}_b$ is defined analogously on $\frac{\beta_b}{\beta_i}\mathrm{s}$ . For the sake of stability in training, we use $\operatorname{Sign}(x)\approx \tanh (\alpha x)$ and $|x| = \sqrt{x^2 + \epsilon}$ , where $\epsilon >0$ is a small number (i.e., $\epsilon = 1e - 6$ ). As discussed in Equation 24, the case that $q\neq 2$ appears as a normalization term on the memory. Similar to Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024), we do not apply this non-linearity inside each chunk and instead use it at the end of each chunk. + +For YAAD, the process is very similar to the above. We calculate the gradient of both $\ell_1$ and $\ell_2$ loss and use a masking based on $\| \mathcal{M}(\mathbf{k}_t) - \mathbf{v}_t\| \leq \delta_t$ . + +For MEMORA, the update rule has two non-linear parts, i.e., softmax and log, making the model hardly parallelizable. To this end, as discussed above, we use its linear version inside each chunk and its non-linear version across chunks. However, using both log and softmax at the end of each chunk removes the effect of log. To this end, we consider a lag tokens after each chunk (i.e., tokens with index $i = kb + 1$ , where $b$ is the chunk size and $k \in \mathbb{Z}^+$ ). That is, let $\mathcal{M}_0$ be the last state of the memory in previous chunk, we have: + +$$ +\mathcal {M} _ {1} = \operatorname {S o f t m a x} \left(\alpha_ {1} \log \left(\mathcal {M} _ {0}\right) - \eta_ {1} \nabla \ell_ {2} \left(\mathcal {M} _ {0}; \mathbf {k} _ {1}, \mathbf {v} _ {1}\right)\right), \tag {30} +$$ + +and then we use $\mathcal{M}_1$ for the next chunk. Again, for the sake of clarity, assume that memory is linear, i.e., $\mathcal{M}_1 = W_1$ : + +$$ +\begin{array}{l} \nabla \ell \left(W _ {1}; \mathbf {k} _ {t}, \mathbf {v} _ {t}\right) = \left(W _ {1} \mathbf {k} _ {t} - \mathbf {v} _ {t}\right) \mathbf {k} _ {t} ^ {\top} (31) \\ \Rightarrow \sum_ {i = 1} ^ {b} \eta_ {i} \frac {\beta_ {b}}{\beta_ {i}} \nabla \ell \left(W _ {1};; \mathbf {k} _ {i}, \mathbf {v} _ {i}\right) = \mathbf {E} _ {b} \odot \mathbf {B} _ {b} \odot \left(W _ {1} \mathbf {K} - \mathbf {V}\right) \mathbf {K} ^ {\top}, (32) \\ \end{array} +$$ + +where matrices are defined the same as for Equation 29. + +# 6 Experiments + +In our experimental evaluations, we aim to answer three main questions: (1) Does different attentional biases results in different architectures in practice? (2) How does different types of retention gates (i.e., retention gate) affect the performance of the model in long context? (3) How do MEMORA, MONETA, and YAAD perform in downstream tasks compare to baselines? + +Setup. We train our models with training context window of size 4096 using either FineWeb-Edu dataset (Penedo et al. 2024) (for LM and common-sense reasoning tasks) or C4 dataset (Raffel et al. 2020) (for scaling patterns). We use model + +![](images/23e8a3c068128a8a12d8568ecdde19b54a9ecb901162d0c610604a0abdd79fbe.jpg) +Figure 3: Scaling patterns when increasing (Left) model size, (Middle) sequence length (model size = 340M) (3) (Right) sequence length (model size = 760M) on C4 dataset. + +![](images/bb38c6f7c2bb840b3f533a645aa391e2005cae92f583aa262a8d6028d4e35f08.jpg) + +![](images/1e71d292b87b5b94660e97445a73eff75c18a71916cb028bc365fee679360b73.jpg) + +sizes of 120M, 340M, 760M, and 1.3B parameters. We train small models (120M and 340M) on 15B tokens sampled from the dataset, the medium size model (760M) on 30B tokens, and the large model on 100B tokens. Baseline results are reported by Behrouz et al. (2024c). + +# 6.1 Language Modeling and Common-sense Reasoning + +We follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a,c) and first focus on the perplexity in language modeling and also commonsense reasoning tasks. The results for MEMORA, YAAD, MONETA and also baselines with size of 340M, 760, and 1.3B are reported in Table 2. All of our variants outperforms all the baselines including Transformer++, modern linear recurrent models and hybrid methods. The superior performance compared to hybrid models is particularly important as all of our variants are pure recurrent (attention-free). Among the three variants of MirAS, while MONETA achieves slightly weaker performance than MEMORA, and YAAD, the other two variants are close and depending on the task and model size, the best model can vary. + +# 6.2 Scaling Pattern + +To evaluate the scaling pattern of models and for comparing them with baseline, in this section, we plot their performance with varying the model size and the context window. + +Context Length. We first vary the training context length from 2K to 32K for two versions of our model with size 340M and 760M. The results are reported in Figure 3 (Middle and Right). All three variants of Miras scales better than state-of-the-art baselines when increasing the context length. We attribute this superior performance to: (1) expressive memory architecture. Contrary to baselines like Mamba2 and GSA that uses vector- and matrix-valued memory, our variants are using 2-layer MLPs with more expressive power to learn from longer sequences. (2) The choice of retention gate and attentional bias: All of our three variants go beyond the standard attentional biases and retention gates. These choices can help the memory to better manage its fixed-size capacity. + +Model Size. We also report the #FLOPs vs. perplexity of our models and baselines in Figure 3 (Left). All three variants outperforms all baselines given almost the same budget of FLOPs. These results, once again support the importance of powerful memory design. + +# 6.3 Needle In Haystack + +To evaluate the effective context window of our models and baselines, we use needle-in-haystack task. In this task, we evaluate the model on retrieving a piece of information (i.e., the "needle") from long distractor texts (i.e., the "haystack"). We focus on the Single NIAH (S-NIAH) task from RULER benchmark (Hsieh et al. 2024) and evaluate our models and baselines on sequences with length 1K, 2K, 4K, and 8K. The results are reported in Table 3. All our variants outperforms all the baselines with a considerable margin. Interestingly, MONETA shows better performance than others when the data is synthetic noise (S-NIAH-PK). This observation validates the effectiveness of $p$ -norm objective and retention gates as they are more robust to noise. + +Table 2: Performance of MIRAS's variants and baselines on language modeling and common-sense reasoning tasks. Hybrid models are marked with *. The best results of simple and hybrid models are highlighted. In largest scale, we compare our simple models with even hybrid models and highlight the best results. + +
ModelWiki. ppl ↓LMB. ppl ↓LMB. acc ↑PIQA acc ↑Hella. acc_n ↑Wino. acc ↑ARC-e acc ↑ARC-c acc_n ↑SIQA acc ↑BoolQ acc ↑
340M params / 15B tokens
Transformer++31.5241.0830.7662.9834.7650.5345.2124.0536.8158.24
RetNet32.5049.7328.2462.6134.1550.9144.2723.6236.7959.72
GLA28.5143.0228.7364.0535.9650.0054.1924.2937.1358.39
Mamba30.8340.2129.9463.7935.8849.8249.2424.5635.4160.07
DeltaNet28.6547.3028.4363.5235.9549.6352.6825.3737.9658.79
TTT27.4434.1930.0663.9735.7150.0853.0126.1137.3259.83
Gated DeltaNet27.0130.9434.1163.0838.1251.6055.2826.7734.8959.54
MONETA (ours)26.1929.3135.7063.9939.2352.0455.9627.1537.2960.22
YAAD (ours)26.6129.1134.0964.9339.8651.1254.7528.6433.8260.29
MEMORA (ours)27.1630.4433.6865.2139.1751.2353.4027.9934.159.29
760M params / 30B tokens
Transformer++25.2127.6435.7866.9242.1951.9560.3832.4639.5160.37
RetNet26.0824.4534.5167.1941.6352.0963.1732.7838.3657.92
Mamba222.9428.3733.5467.9042.7149.7763.4831.0940.0658.15
DeltaNet24.3724.6037.0666.9341.9850.6564.8731.3939.8859.02
TTT24.1723.5134.7467.2543.9250.9964.5333.8140.1659.58
Gated DeltaNet21.1822.0935.5468.0144.9550.7366.8733.0939.2159.14
Samba*20.6322.7139.7269.1947.3552.0166.9233.2038.9861.24
Gated DeltaNet-H2*19.8820.8339.1868.9548.2252.5767.0135.4939.3961.11
MONETA (ours)21.1821.9438.0269.5549.1653.0167.4736.0940.5363.18
YAAD (ours)20.9921.5737.8569.1450.0253.9367.7836.2741.0163.34
MEMORA (ours)22.2822.3138.1967.8249.3053.2863.5736.1540.9462.96
MONETA-H (ours)18.7220.1340.5970.8450.1354.1767.6436.7940.8762.43
YAAD-H (ours)18.5919.8040.2269.5150.4853.6968.0436.5540.2861.94
MEMORA-H (ours)18.2420.5539.9169.0649.8452.8866.9036.1240.9961.75
1.3B params / 100B tokens
Transformer++18.5318.3242.6070.0250.2353.5168.8335.1040.6657.09
RetNet19.0817.2740.5270.0749.1654.1467.3433.7840.7860.39
Mamba216.5612.5645.6671.8755.6755.2472.4737.8840.2060.13
DeltaNet17.7116.8842.4670.7250.9353.3568.4735.6640.2255.29
Gated DeltaNet16.4212.1746.6572.2555.7657.4571.2138.3940.6360.24
Samba*16.1313.2944.9470.9453.4255.5668.8136.1739.9662.11
Gated DeltaNet-H2*15.9112.5548.7672.1956.8857.7771.3339.0741.9161.55
MONETA (ours)15.5211.4747.8873.1656.1459.0972.5340.3241.9161.18
YAAD (ours)15.1811.8947.2372.8156.4659.0272.1440.0540.7361.86
MEMORA (ours)15.9012.0448.6773.1055.9957.3671.5537.9240.1961.34
+ +# 6.4 Ablation Study + +In this section we perform ablation studies to validate if different design choices that we discussed through the paper are positively contributing for achieving better results. + +The Effect of $p$ on Performance. We first evaluate the effect of $p$ on the performance of MONETA. We vary the value of $p \in \{1, 1.5, 2, 2.8, 3, 3.2, 4\}$ and context window from 2K to 16K. The results are reported in Figure 4. Interestingly, there is no monotone pattern when increasing the value of $p$ and the best performance is achieved when $p = 3$ , while $p = 4$ + +Table 3: Performance of MONETA, YAAD, MEMORA, and baselines on NIAH task from RULER benchmark. The best results with highest accuracy are highlighted. + +
ModelS-NIAH-PKS-NIAH-NS-NIAH-WAverage
2K4K8K2K4K8K1K2K4K
Mamba298.661.431.098.455.814.262.242.24.252.0
DeltaNet96.898.898.647.215.412.885.246.220.057.9
Gated DeltaNet89.891.490.099.291.826.486.482.624.475.8
TTT98.498.898.060.236.610.285.878.828.066.1
MONETA99.498.898.899.499.492.892.288.270.893.5
YaAD99.298.694.499.898.693.291.889.667.492.9
MEMORA99.298.892.698.499.293.292.488.270.492.1
+ +achieves the worst performance. Also, although different values of $p$ results in different memory modules with varied performance, the scaling pattern when increasing the context length is almost the same. + +The Effect of $q$ on Performance. Similarly, we evaluate the effect of $q$ by varying it in $\{2, 3, 4, 5\}$ . Interestingly, contrary to $p$ , the value of $q$ can change the scaling pattern when increasing the context length. The main reason for this observation is that the value of $q$ determines the retention gate and a powerful retention gate can improve the memory management, resulting in better performance. + +The Effect of Design. To evaluate the architectural design choices, we perform an ablation study on YAAD. The results are in Table 4. The first row, reports the performance of YAAD, while (1) the second row removes the retention (i.e., $\beta = 1$ ), (2) third row makes $\delta$ input independent, (3) the third row removes $\ell_2$ -loss from the Huber loss, (4) the forth row removes the $\ell_1$ condition, and (5) the last row replaces the MLP with a linear layer. These results indicate that all design choices are contributing to the performance of the model. + +![](images/a0ae2ecc3a1fdcbb355b80baff04bab982ec5c72538465ed489eb68b9ab78288.jpg) +Figure 4: The effect of parameters $p$ and $q$ on the performance with different context length. + +![](images/85a921c0e5ac0811c346988a447963c727a9058481fbea39ac62dc440e750ccb.jpg) + +Table 4: Ablation study on the components of YAAD. + +
ModelAvg. LM
YAAD53.98
- Retention Gate50.63
- Input-dependent δ52.19
l2-loss52.86
l1-loss53.04
linear memory51.57
+ +# 7 Conclusion + +In this paper, we present MIRAS, a general framework that explains the connection of online optimization and test time memorization. MIRAS framework can explain the role of several standard architectural choices in the literature (e.g., forget gate) and helps design next generation of architectures that are capable of managing the memory better. Building upon our framework, we present three novel sequence models, each of which with its own (dis)advantages. Our experimental evaluations show that all these variants are more powerful than Transformers and linear RNNs, in various downstream tasks. In this work, we present a diverse set of variants using MIRAS. In future, exploring these alternative architectures for different downstream tasks is an interesting future direction. + +# References + +[1] Ali Behrouz, Parsa Delavari, and Farnoosh Hashemi. "Unsupervised Representation Learning of Brain Activity via Bridging Voxel Activity and Functional Connectivity". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=n0jZfpLyh1. +[2] Ali Behrouz, Michele Santacatterina, and Ramin Zabih. "Mambamixer: Efficient selective state space models with dual token and channel selection". In: arXiv preprint arXiv:2403.19888 (2024). +[3] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. "Titans: Learning to memorize at test time". In: arXiv preprint arXiv:2501.00663 (2024). +[4] Alberto Bietti, Vivien Cabannes, Diane Bouchacourt, Herve Jegou, and Leon Bottou. "Birth of a transformer: A memory viewpoint". In: Advances in Neural Information Processing Systems 36 (2023), pp. 1560-1588. +[5] Yonatan Bisk, Rowan Zellers, Jianfeng Gao, Yejin Choi, et al. "Piqa: Reasoning about physical commonsense in natural language". In: Proceedings of the AAAI conference on artificial intelligence. Vol. 34. 2020, pp. 7432-7439. +[6] Leon Bottou and Vladimir Vapnik. "Local learning algorithms". In: Neural computation 4.6 (1992), pp. 888-900. +[7] Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. "BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions". In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). Ed. by Jill Burstein, Christy Doran, and Thamar Solorio. Minneapolis, Minnesota: Association for Computational Linguistics, June 2019, pp. 2924-2936. DOI: 10.18653/v1/N19-1300. URL: https://aclanthology.org/N19-1300/. +[8] Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. "Think you have solved question answering? try arc, the ai2 reasoning challenge". In: arXiv preprint arXiv:1803.05457 (2018). +[9] Imre Csiszar. "On information-type measure of difference of probability distributions and indirect observations". In: Studia Sci. Math. Hungar. 2 (1967), pp. 299-318. +[10] Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. "Recurrent Neural Networks Learn to Store and Generate Sequences using Non-Linear Representations". In: Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP. 2024, pp. 248-262. +[11] Karan Dalal, Daniel Koceja, Gashon Hussein, Jiarui Xu, Yue Zhao, Youjin Song, Shihao Han, Ka Chun Cheung, Jan Kautz, Carlos Guestrin, et al. "One-Minute Video Generation with Test-Time Training". In: arXiv preprint arXiv:2504.05298 (2025). +[12] Tri Dao and Albert Gu. "Transformers are SSMs: Generalized models and efficient algorithms through structured state space duality". In: arXiv preprint arXiv:2405.21060 (2024). +[13] Soham De, Samuel L Smith, Anushan Fernando, Aleksandar Botev, George Cristian-Muraru, Albert Gu, Ruba Haroun, Leonard Berrada, Yutian Chen, Srivatsan Srinivasan, et al. "Griffin: Mixing gated linear recurrences with local attention for efficient language models". In: arXiv preprint arXiv:2402.19427 (2024). +[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. "An image is worth 16x16 words: Transformers for image recognition at scale". In: arXiv preprint arXiv:2010.11929 (2020). +[15] Yossi Gandelsman, Yu Sun, Xinlei Chen, and Alexei Efros. "Test-time training with masked autoencoders". In: Advances in Neural Information Processing Systems 35 (2022), pp. 29374-29385. +[16] Xavier Gonzalez, Andrew Warrington, Jimmy Smith, and Scott Linderman. "Towards scalable and stable parallelization of nonlinear rnns". In: Advances in Neural Information Processing Systems 37 (2024), pp. 5817-5849. +[17] Riccardo Grazzi, Julien Siems, Jörg KH Franke, Arber Zela, Frank Hutter, and Massimiliano Pontil. "Unlocking state-tracking in linear rnns through negative eigenvalues". In: arXiv preprint arXiv:2411.12537 (2024). +[18] Klaus Greff, Rupesh K Srivastava, Jan Koutnk, Bas R Steunebrink, and Jürgen Schmidhuber. "LSTM: A search space odyssey". In: IEEE transactions on neural networks and learning systems 28.10 (2016), pp. 2222-2232. +[19] Albert Gu and Tri Dao. "Mamba: Linear-Time Sequence Modeling with Selective State Spaces". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=tEYskw1VY2. +[20] Albert Gu, Karan Goel, and Christopher Re. "Efficiently Modeling Long Sequences with Structured State Spaces". In: International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=uYLFOz1v1AC. + +[21] Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. "Liquid Structural State-Space Models". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=g4OTKRKfS7R. +[22]Trevor Hastie, Robert Tibshirani, Jerome Friedman, et al. The elements of statistical learning. 2009. +[23] Elad Hazan et al. "Introduction to online convex optimization". In: Foundations and Trends® in Optimization 2.3-4 (2016), pp. 157-325. +[24] Donald Olding Hebb. The organization of behavior: A neuropsychological theory. Psychology press, 2005. +[25] Dan Hendrycks and Kevin Gimpel. "Gaussian error linear units (gelus)". In: arXiv preprint arXiv:1606.08415 (2016). +[26] Donald E Hilt and Donald W Seegrist. Ridge, a computer program for calculating ridge regression estimates. Vol. 236. Department of Agriculture, Forest Service, Northeastern Forest Experiment ..., 1977. +[27] Arthur E Hoerl and Robert W Kennard. "Ridge regression: applications to nonorthogonal problems". In: Technometrics 12.1 (1970), pp. 69-82. +[28] John J Hopfield. “Neural networks and physical systems with emergent collective computational abilities.” In: Proceedings of the national academy of sciences 79.8 (1982), pp. 2554-2558. +[29] Cheng-Ping Hsieh, Simeng Sun, Samuel Kriman, Shantanu Acharya, Dima Rekesh, Fei Jia, and Boris Ginsburg. "RULER: What's the Real Context Size of Your Long-Context Language Models?" In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=kIoBbc76Sy. +[30] Jerry Yao-Chieh Hu, Dennis Wu, and Han Liu. "Provably optimal memory capacity for modern hopfield models: Transformer-compatible dense associative memories as spherical codes". In: arXiv preprint arXiv:2410.23126 (2024). +[31] Peter J Huber. "Robust estimation of a location parameter". In: Breakthroughs in statistics: Methodology and distribution. Springer, 1992, pp. 492-518. +[32] Kazuki Irie, Robert Csordas, and Jürgen Schmidhuber. "Practical computational power of linear transformers and their recurrent and self-referential extensions". In: arXiv preprint arXiv:2310.16076 (2023). +[33] Kazuki Irie, Imanol Schlag, Robert Csordas, and Jurgen Schmidhuber. "Going beyond linear transformers with recurrent fast weight programmers". In: Advances in neural information processing systems 34 (2021), pp. 7703-7717. +[34] Vidit Jain and Erik Learned-Miller. "Online domain adaptation of a pre-trained cascade of classifiers". In: CVPR 2011. IEEE. 2011, pp. 577-584. +[35] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. "Scaling laws for neural language models". In: arXiv preprint arXiv:2001.08361 (2020). +[36] M. Karami and V. Mirrokni. Lattice: Learning to Efficiently Compress the Memory. 2025. +[37] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. "Transformers are rnns: Fast autoregressive transformers with linear attention". In: International conference on machine learning. PMLR. 2020, pp. 5156-5165. +[38] Dmitry Krotov. "Hierarchical associative memory". In: arXiv preprint arXiv:2107.06446 (2021). +[39] Dmitry Krotov and John J Hopfield. “Dense associative memory for pattern recognition”. In: Advances in neural information processing systems 29 (2016). +[40] Aonian Li, Bangwei Gong, Bo Yang, Boji Shan, Chang Liu, Cheng Zhu, Chunhao Zhang, Congchao Guo, Da Chen, Dong Li, et al. "Minimax-01: Scaling foundation models with lightning attention". In: arXiv preprint arXiv:2501.08313 (2025). +[41] Chengxuan Li, Di Huang, Zeyu Lu, Yang Xiao, Qingqi Pei, and Lei Bai. “A survey on long video generation: Challenges, methods, and prospects”. In: arXiv preprint arXiv:2403.16407 (2024). +[42] Xiaoyu Li, Yuanpeng Li, Yingyu Liang, Zhenmei Shi, and Zhao Song. "On the expressive power of modern hopfield networks". In: arXiv preprint arXiv:2412.05562 (2024). +[43] Yi Heng Lim, Qi Zhu, Joshua Selfridge, and Muhammad Firmansyah Kasim. "Parallelizing non-linear sequential models over the sequence length". In: The Twelfth International Conference on Learning Representations. 2024. URL: https://openreview.net/forum?id=E34A1VLN0v. +[44] Bo Liu, Rui Wang, Lemeng Wu, Yihao Feng, Peter Stone, and Qiang Liu. "Longhorn: State space models are amortized online learners". In: arXiv preprint arXiv:2407.14207 (2024). +[45] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. "Lost in the middle: How language models use long contexts". In: Transactions of the Association for Computational Linguistics 12 (2024), pp. 157-173. +[46] Elizabeth F Loftus. "The reality of repressed memories." In: American psychologist 48.5 (1993), p. 518. + +[47] Carlo Lucibello and Marc Mézard. "Exponential capacity of dense associative memories". In: Physical Review Letters 132.7 (2024), p. 077301. +[48] Julien Mairal. "Incremental majorization-minimization optimization with application to large-scale machine learning". In: SIAM Journal on Optimization 25.2 (2015), pp. 829-855. +[49] Harsh Mehta, Ankit Gupta, Ashok Cutkosky, and Behnam Neyshabur. "Long Range Language Modeling via Gated State Spaces". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=5MkYIYCbva. +[50] Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. “Pointer Sentinel Mixture Models”. In: International Conference on Learning Representations. 2017. URL: https://openreview.net/forum?id=Byj72udxe. +[51] William Merrill, Jackson Petty, and Ashish Sabharwal. "The Illusion of State in State-Space Models". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=QZgo9JZpLq. +[52] Ravi Teja Mullapudi, Steven Chen, Keyi Zhang, Deva Ramanan, and Kayvon Fatahalian. "Online model distillation for efficient video inference". In: Proceedings of the IEEE/CVF International conference on computer vision. 2019, pp. 3573-3582. +[53] Tsendsuren Munkhdalai, Alessandro Sordoni, Tong Wang, and Adam Trischler. “Metalearned neural memory”. In: Advances in Neural Information Processing Systems 32 (2019). +[54] Tsendsuren Munkhdalai and Hong Yu. "Neural semantic encoders". In: Proceedings of the conference. Association for Computational Linguistics. Meeting. Vol. 1. NIH Public Access. 2017, p. 397. +[55] Daniel Neil, Jun Haeng Lee, Tobi Delbruck, and Shih-Chii Liu. "Delta networks for optimized recurrent network computation". In: International conference on machine learning. PMLR. 2017, pp. 2584-2593. +[56] Hideyuki Okano, Tomoo Hirano, and Evan Balaban. "Learning and memory". In: Proceedings of the National Academy of Sciences 97.23 (2000), pp. 12403-12404. +[57] Antonio Orvieto, Samuel L Smith, Albert Gu, Anushan Fernando, Caglar Gulcehre, Razvan Pascanu, and Soham De. "Resurrecting recurrent neural networks for long sequences". In: International Conference on Machine Learning. PMLR. 2023, pp. 26670-26698. +[58] Denis Paperno, German Kruszewski, Angeliki Lazaridou, Ngoc Quan Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernandez. "The LAMBADA dataset: Word prediction requiring a broad discourse context". In: Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Ed. by Katrin Erk and Noah A. Smith. Berlin, Germany: Association for Computational Linguistics, Aug. 2016, pp. 1525-1534. DOI: 10.18653/v1/P16-1144. URL: https://aclanthology.org/P16-1144/. +[59] Guilherme Penedo, Hynek Kydlcek, Loubna Ben allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro Von Werra, and Thomas Wolf. "The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale". In: The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2024. URL: https://openreview.net/forum?id=n6Sckn2QaG. +[60] Bo Peng, Eric Alcaide, Quentin Gregory Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Nguyen Chung, Leon Derczynski, Xingjian Du, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, Jiaju Lin, Krishna Sri Ipsit Mantri, Ferdinand Mom, Atsushi Saito, Guangyu Song, Xiangru Tang, Johan S. Wind, Stanisław Wozniak, Zhenyuan Zhang, Qinghua Zhou, Jian Zhu, and Rui-Jie Zhu. "RWKV: Reinventing RNNs for the Transformer Era". In: The 2023 Conference on Empirical Methods in Natural Language Processing. 2023. URL: https://openreview.net/forum?id=7SaXczaBpG. +[61] Bo Peng, Daniel Goldstein, Quentin Anthony, Alon Albalak, Eric Alcaide, Stella Biderman, Eugene Cheah, Xingjian Du, Teddy Ferdinan, Haowen Hou, et al. "Eagle and finch: Rwkv with matrix-valued states and dynamic recurrence". In: arXiv preprint arXiv:2404.05892 (2024). +[62] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Saiteja Utpala, et al. "RWKV-7" Goose" with Expressive Dynamic State Evolution". In: arXiv preprint arXiv:2503.14456 (2025). +[63] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Siateja Utpala, et al. "Rwkv-7" goose" with expressive dynamic state evolution". In: arXiv preprint arXiv:2503.14456 (2025). +[64] Yury Polyanskiy and Yihong Wu. Information theory: From coding to learning. Cambridge university press, 2025. +[65] DL Prados and SC Kak. "Neural network capacity using delta rule". In: *Electronics Letters* 25.3 (1989), pp. 197-199. + +[66] Zhen Qin, Songlin Yang, Weixuan Sun, Xuyang Shen, Dong Li, Weigao Sun, and Yiran Zhong. "HGRN2: Gated Linear RNNs with State Expansion". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=y6SqBJfCSk. +[67] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. "Exploring the limits of transfer learning with a unified text-to-text transformer". In: Journal of machine learning research 21.140 (2020), pp. 1-67. +[68] Hubert Ramsauer, Bernhard Schäfl, Johannes Lehner, Philipp Seidl, Michael Widrich, Lukas Gruber, Markus Holzleitner, Thomas Adler, David Kreil, Michael K Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. "Hopfield Networks is All You Need". In: International Conference on Learning Representations. 2021. URL: https://openreview.net/forum?id=tL89RnzIiCd. +[69] Meisam Razaviyayn, Mingyi Hong, and Zhi-Quan Luo. “A unified convergence analysis of block successive minimization methods for nonsmooth optimization”. In: SIAM Journal on Optimization 23.2 (2013), pp. 1126–1153. +[70] Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. "Samba: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling". In: arXiv preprint arXiv:2406.07522 (2024). +[71] Lee T Robertson. "Memory and the brain". In: Journal of dental education 66.1 (2002), pp. 30-42. +[72] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. "Winogrande: An adversarial winograd schema challenge at scale". In: Communications of the ACM 64.9 (2021), pp. 99-106. +[73] Imanol Schlag, Kazuki Irie, and Jürgen Schmidhuber. "Linear transformers are secretly fast weight programmers". In: International Conference on Machine Learning. PMLR. 2021, pp. 9355-9366. +[74] JH Schmidhuber. "Learning to control fast-weight memories: An alternative to recurrent nets. Accepted for publication in". In: Neural Computation (1992). +[75] Jürgen Schmidhuber. “Reducing the ratio between learning complexity and number of time varying variables in fully recurrent nets”. In: ICANN'93: Proceedings of the International Conference on Artificial Neural Networks Amsterdam, The Netherlands 13–16 September 1993 3. Springer. 1993, pp. 460–463. +[76] Jürgen Schmidhuber and Sepp Hochreiter. "Long Short-term Memory". In: Neural Computation MIT-Press (1997). +[77] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. "Implicit Language Models are RNNs: Balancing Parallelization and Expressivity". In: arXiv preprint arXiv:2502.07827 (2025). +[78] Shai Shalev-Shwartz et al. "Online learning and online convex optimization". In: Foundations and Trends® in Machine Learning 4.2 (2012), pp. 107-194. +[79] Julien Siems, Timur Carstensen, Arber Zela, Frank Hutter, Massimiliano Pontil, and Riccardo Grazzi. "DeltaProduct: Increasing the Expressivity of DeltaNet Through Products of Householders". In: arXiv preprint arXiv:2502.10297 (2025). +[80] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. "Simplified State Space Layers for Sequence Modeling". In: The Eleventh International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=Ai8Hw3AXqks. +[81] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. "Simplified State Space Layers for Sequence Modeling". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=Ai8Hw3AXqks. +[82] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. "Rofomer: Enhanced transformer with rotary position embedding". In: Neurocomputing 568 (2024), p. 127063. +[83] Yu Sun, Xinhao Li, Karan Dalal, Jiarui Xu, Arjun Vikram, Genghan Zhang, Yann Dubois, Xinlei Chen, Xiaolong Wang, Sanmi Koyejo, et al. "Learning to (learn at test time): Rnns with expressive hidden states". In: arXiv preprint arXiv:2407.04620 (2024). +[84] Yutao Sun, Li Dong, Shaohan Huang, Shuming Ma, Yuqing Xia, Jilong Xue, Jianyong Wang, and Furu Wei. "Retentive network: A successor to transformer for large language models". In: arXiv preprint arXiv:2307.08621 (2023). +[85] W Scott Terry. Learning and memory: Basic principles, processes, and procedures. Routledge, 2017. +[86] Robert Tibshirani. "Regression shrinkage and selection via the lasso". In: Journal of the Royal Statistical Society Series B: Statistical Methodology 58.1 (1996), pp. 267-288. +[87] Matteo Tiezzi, Michele Casoni, Alessandro Betti, Tommaso Guidi, Marco Gori, and Stefano Melacci. "On the resurgence of recurrent models for long sequences: Survey and research opportunities in the transformer era". In: arXiv preprint arXiv:2402.08132 (2024). +[88] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. "Llama: Open and efficient foundation language models". In: arXiv preprint arXiv:2302.13971 (2023). + +[89] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. "Attention is All you Need". In: Advances in Neural Information Processing Systems. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf. +[90] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. "Attention is All you Need". In: Advances in Neural Information Processing Systems. Ed. by I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf. +[91] Johannes Von Oswald, Maximilian Schlegel, Alexander Meulemans, Seijin Kobayashi, Eyvind Niklasson, Nicolas Zucchet, Nino Scherrer, Nolan Miller, Mark Sandler, Max Vlademyrov, et al. "Uncovering mesa-optimization algorithms in transformers". In: arXiv preprint arXiv:2309.05858 (2023). +[92] Ke Alexander Wang, Jiaxin Shi, and Emily B Fox. "Test-time regression: a unifying framework for designing sequence models with associative memory". In: arXiv preprint arXiv:2501.12352 (2025). +[93] Yingheng Wang, Zichen Wang, Gil Sadeh, Luca Zancato, Alessandro Achille, George Karypis, and Huzefa Rangwala. "Long-context Protein Language Model". In: bioRxiv (2024), pp. 2024-10. +[94] Songlin Yang, Jan Kautz, and Ali Hatamizadeh. “Gated Delta Networks: Improving Mamba2 with Delta Rule”. In: arXiv preprint arXiv:2412.06464 (2024). +[95] Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. “Gated Linear Attention Transformers with Hardware-Efficient Training”. In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=ia5XvxFUJT. +[96] Songlin Yang, Bailin Wang, Yu Zhang, Yikang Shen, and Yoon Kim. "Parallelizing linear transformers with the delta rule over sequence length". In: Advances in Neural Information Processing Systems 37 (2024), pp. 115491-115522. +[97] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. "HellaSwag: Can a Machine Really Finish Your Sentence?" In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Ed. by Anna Korhonen, David Traum, and Lluis Marquez. Florence, Italy: Association for Computational Linguistics, July 2019, pp. 4791-4800. DOI: 10.18653/v1/P19-1472. URL: https://aclanthology.org/P19-1472/. +[98] Biao Zhang and Rico Sennrich. "Root mean square layer normalization". In: Advances in Neural Information Processing Systems 32 (2019). +[99] Hao Zhang, Alexander C Berg, Michael Maire, and Jitendra Malik. "SVM-KNN: Discriminative nearest neighbor classification for visual category recognition". In: 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06). Vol. 2. IEEE. 2006, pp. 2126-2136. + +# A Additional Related Work + +Modern Linear RNNs. Recent efforts aim to overcome Transformers quadratic cost and limitations in long-context modeling by designing efficient recurrent alternatives (Tiezzi et al. 2024), mainly due to fast inference and training of such models. The first generation of models—such as RetNet (Sun et al. 2023), LRU (Orvieto et al. 2023), RWKV (Peng et al. 2023), S5 (Smith et al. 2023), and S4 (Gu et al. 2022)—uses data-independent transition matrix mechanism with Hebbian-like update rule. The second generation of such models started to incorporate input-dependent parameters into such linear architectures (e.g., Griffin (De et al. 2024), SSMs (Behrouz et al. 2024b; Dao et al. 2024; Hasani et al. 2023), RWKV6 (Peng et al. 2024)), and/or use more expressive memory updating rule based on delta rule (Liu et al. 2024a; Peng et al. 2025b; Schlag et al. 2021; Yang et al. 2024a,c). The next generation of models, extend the memory architecture to deep models, while using delta-rule-like update rule (Sun et al. 2024), or momentum-based update rule (Behrouz et al. 2024c). Recently, to further enhance the performance of delta-rule-based sequence models, Siemens et al. (2025) suggest using multiple gradient descent update per token, resulting in more expressive sequence models in state tracking tasks. + +In addition to the above fast linear recurrent sequence models, several studies have focused on (interpretable) non-linear RNNs (Csordás et al. 2024; Gonzalez et al. 2024; Karami et al. 2025; Lim et al. 2024; Merrill et al. 2024; Schone et al. 2025; Von Oswald et al. 2023), and how their training can be faster (Gonzalez et al. 2024; Lim et al. 2024; Schone et al. 2025). However, due to the recurrent nature of such models, parallelizing them in larger scales is still challenging. + +Fast Weight Programs. The idea of interpretation of linear layers as the key-value associative memory system backs to Hopfield networks (Hopfield 1982) and then fast weight programs, in which dynamic fast programs are incorporated into recurrent neural networks as writeable memory (Schlag et al. 2021; Schmidhuber 1992; Schmidhuber 1993). The two learning rules of Hebbian (Hebb 2005) and delta rule (Prados et al. 1989) are the most popular learning rules for them, which have been extensively explored in the literature (Irie et al. 2021; Munkhdalai et al. 2019, 2017; Schlag et al. 2021; Schmidhuber 1992; Yang et al. 2024a,c). + +Test Time Training. The key ideas of learning at test time backs to early studies on local learning Bottou et al. 1992, in which each test data is trained on its neighbors before making a prediction (Gandelsman et al. 2022; Zhang et al. 2006). Later applying this idea on modern architectures, it has shown promising performance in diverse downstream tasks such as vision tasks (Jain et al. 2011; Mullapudi et al. 2019), video generation (Dalal et al. 2025), etc., mostly due to their ability to mitigate out-of-distribution samples. + +Hopfield Networks. We build MIRAS based on the concept of associative memory in its broad form, where we learn an underlying mapping between keys and values. One of the earliest studies that discuss building neural architectures based on associative memory is Hopfield Networks (Hopfield 1982), in which associative memory is defined as the minimizing the energy function required to store keys and values. While traditional Hopfield networks has limited applicability in recent years (mainly due to limited capacity of vector-valued memory and energy function), several recent studies aim to improve their capacity by various techniques (Krotov 2021; Krotov et al. 2016; Li et al. 2024b), including extending the energy function of such models based on exponential kernels (Krotov et al. 2016; Lucibello et al. 2024), and discuss their connection to Transformers (Hu et al. 2024; Ramsauer et al. 2021). + +Unifying Frameworks. In recent years, there have been growing efforts to understand the underlying mechanism of sequence models and unify (a subset of) them through a single perspective. Dao et al. (2024) present SSD framework to connect linear Transformers and (a subset of) linear recurrent models through the lens of associative operators and structured matrices. The SSD framework, however, is limited to models with vector or matrix-valued memory that are updated using a Hebbian-like update rules. Later, Liu et al. (2024a) present an online learning perspective on (a subset of) linear recurrent models. While this framework can also explain more expressive recurrent models based on delta rule, it is limited to online learners (i.e., models that optimize their internal associative memory using stochastic optimizers, such as stochastic gradient descent) with matrix-valued memory. Several modern sequence models, such as Transformers (Vaswani et al. 2017b) or Titans (Behrouz et al. 2024c) cannot be expressed in this framework. Sun et al. (2024) further provide a unifying perspective on how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models, mainly due to limiting the objective to be regression loss. Recently, in a concurrent work to ours, Wang et al. (2025) also force models to have the same attentional bias objective and show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss. + +However, this simplification, fully changes the understanding of underlying update rules in these models. For example, contrary to Wang et al. (2025), MIRAS can distinguish models with Hebbian-like update (with dot product similarity) and delta rule update (with regression loss). Furthermore, all presented sequence models in this work (e.g., MONETA, MEMORA, YAAD) as well as models like HGRN2 (Qin et al. 2024) are placed outside of this class of models, due to their different attentional bias. + +# B Proof of Proposition 3.2 + +Here we present the proof of Proposition 3.2. For the sake of completeness, let us first re-state this Proposition. + +Proposition 3.2. Let $\eta_t = \eta$ and define $h_t(W) \coloneqq \sum_{i=1}^{t-1} \widehat{\ell}_i(W; \mathbf{k}_i, \mathbf{v}_i) + \frac{1}{\eta} R(W)$ . Assume $\mathcal{W} = \mathbb{R}^d$ and the function $h_t(W)$ is strictly convex in $W$ and let $\mathcal{D}_h(\cdot, \cdot)$ be the Bregman divergence defined by function $h(\cdot)$ , i.e., $\mathcal{D}_h(W, W') = h(W) - h(W') - \langle \nabla h(W'), W - W' \rangle$ . Set $\mathrm{Ret}_t(W, W') = \mathcal{D}_h(W, W')$ and $\widetilde{\ell}_t(W; x_t) = \widehat{\ell}_t(W; x_t)$ in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint). + +Proof. Let $\{\widehat{W}_1, \widehat{W}_2, \ldots\}$ be the sequence of parameters obtained by (FTRL Viewpoint) and $\{\widetilde{W}_1, \widetilde{W}_2, \ldots\}$ be the sequence of parameters obtained by (Learning-Retaining Viewpoint). To show both update rules are equivalent, it suffices to show that the above two sequences are the same if they are initialized at the same point. We prove this statement by induction. First of all, since both sequences are initialized at the same point, the induction base is satisfied (i.e. $\widetilde{W}_1 = \widehat{W}_1$ ). Now, assume by induction hypothesis that + +$$ +\widetilde {W} _ {t - 1} = \widehat {W} _ {t - 1}. \tag {33} +$$ + +To complete the induction, we need to show $\widetilde{W}_t = \widehat{W}_t$ . To this end, notice that, by (Learning-Retaining Viewpoint), we have + +$$ +\widetilde {W} _ {t} = \arg \min _ {W} \quad \widetilde {\ell} _ {t} (W, \mathbf {k} _ {t}, \mathbf {v} _ {t}) + \operatorname {R e t} _ {t} (W, \widetilde {W} _ {t - 1}) +$$ + +Using the choice of the Attentional Bias and the Retention function in the Proposition, we obtain + +$$ +\begin{array}{l} \widetilde {W} _ {t} = \arg \min _ {W} \quad \widehat {\ell_ {t}} (W, \mathbf {k} _ {t}, \mathbf {v} _ {t}) + \sum_ {i = 1} ^ {t - 1} \widehat {\ell_ {i}} (W, \mathbf {k} _ {i}, \mathbf {v} _ {i}) + \frac {1}{\eta} R (W) - \sum_ {i = 1} ^ {t - 1} \widehat {\ell_ {i}} (\widetilde {W} _ {t - 1}, \mathbf {k} _ {i}, \mathbf {v} _ {i}) \tag {34} \\ - \frac {1}{\eta} R (\widetilde {W} _ {t - 1}) - \left\langle \sum_ {i = 1} ^ {t - 1} \nabla \widehat {\ell_ {i}} (\widetilde {W} _ {t - 1}, \mathbf {k} _ {i}, \mathbf {v} _ {i}) + \frac {1}{\eta} \nabla R (\widetilde {W} _ {t - 1}), W - \widetilde {W} _ {t - 1} \right\rangle . \\ \end{array} +$$ + +Ignoring the constant terms and using the induction hypothesis (33), we get + +$$ +\begin{array}{l} \widetilde {W} _ {t} = \arg \min _ {W} \quad \widehat {\ell_ {t}} (W, \mathbf {k} _ {t}, \mathbf {v} _ {t}) + \sum_ {i = 1} ^ {t - 1} \widehat {\ell_ {i}} (W, \mathbf {k} _ {i}, \mathbf {v} _ {i}) + \frac {1}{\eta} R (W) \tag {35} \\ - \left\langle \sum_ {i = 1} ^ {t - 1} \nabla \widehat {\ell_ {i}} (\widehat {W} _ {t - 1}, \mathbf {k} _ {i}, \mathbf {v} _ {i}) + \frac {1}{\eta} \nabla R (\widehat {W} _ {t - 1}), W - \widehat {W} _ {t - 1} \right\rangle . \\ \end{array} +$$ + +On the other hand, recall that $\{\widehat{W}_1,\widehat{W}_2,\ldots \}$ is obtained by (FTRL Viewpoint). Therefore, we have + +$$ +\widehat {W} _ {t - 1} = \arg \min _ {W} \sum_ {i = 1} ^ {t - 1} \widehat {\ell_ {i}} (W; \mathbf {k} _ {i}, \mathbf {v} _ {i}) + \frac {1}{\eta} \mathcal {R} _ {t} (W). +$$ + +Thus, we have + +$$ +\sum_ {i = 1} ^ {t - 1} \nabla \widehat {\ell_ {i}} \left(W _ {t - 1}, \mathbf {k} _ {i}, \mathbf {v} _ {i}\right) + \frac {1}{\eta} \nabla R \left(W _ {t - 1}\right) = 0. \tag {36} +$$ + +Combining (36) and (35), we obtain + +$$ +\widetilde {W} _ {t} = \arg \min _ {W} \quad \sum_ {i = 1} ^ {t} \widehat {\ell_ {i}} (W, \mathbf {k} _ {i}, \mathbf {v} _ {i}) + \frac {1}{\eta} R (W). +$$ + +This implies $\widetilde{W}_t = \widehat{W}_t$ , which completes the proof. + +![](images/3329723ab8e514721e400978c83f650752b2cf21d4fc36cede7c1a33e9e7b66c.jpg) + +# C Experimental Setup + +We perform experimental evaluation on the language modeling (Merit et al. 2017; Paperno et al. 2016), common-sense reasoning (Bisk et al. 2020; Clark et al. 2019; Clark et al. 2018; Sakaguchi et al. 2021; Zellers et al. 2019), and long context needle-in-haystack tasks (Hsieh et al. 2024). We compare our models with the state-of-the-art linear recurrent models, Transformers, and hybrid models (recurrent + attention). More specifically we compare with Transformer++ (Touvron et al. 2023), RetNet (Sun et al. 2023), Gated Linear Attention (GLA) (Yang et al. 2024b), Mamba (Gu et al. 2024), Mamba2 (Dao et al. 2024), DeltaNet (Yang et al. 2024c), TTT (Sun et al. 2024), and Gated DeltaNet (Yang et al. 2024a). + +Table 5: Architectural Details. + +
ModelBlockDimHeadPeak LRToken
170M12768163e-315B
340M241024161.5e-315B
780M241536161.25e-330B
\ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13173/images/026c36efefaa24294a6847f1b1619a63f0d80c4dc7095d54b572f5ab7467df99.jpg b/data/2025/2504_13xxx/2504.13173/images/026c36efefaa24294a6847f1b1619a63f0d80c4dc7095d54b572f5ab7467df99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e22f4a9ed7a0317a6d31364bf854a90d8795aaf --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/026c36efefaa24294a6847f1b1619a63f0d80c4dc7095d54b572f5ab7467df99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7271e3800a88fffcfd9dbca99a29304b129dedc6da789cfe3381fa8cb354e584 +size 7870 diff --git a/data/2025/2504_13xxx/2504.13173/images/0a828922658b45ce64a9d60d455642004a9b66661dae2bda4205d140b5a9eae8.jpg b/data/2025/2504_13xxx/2504.13173/images/0a828922658b45ce64a9d60d455642004a9b66661dae2bda4205d140b5a9eae8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..577d15c78dd198dabcf8a6f0c153afc1e73595d5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/0a828922658b45ce64a9d60d455642004a9b66661dae2bda4205d140b5a9eae8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3fe2dae81b4648834a80148a2c8bc17c849790a522d1f44bed6e45533c58680 +size 10648 diff --git a/data/2025/2504_13xxx/2504.13173/images/0a9395ce519dd9a000f0c09fac3e6abdff1a487612ca4195b36605a37a2e84ac.jpg b/data/2025/2504_13xxx/2504.13173/images/0a9395ce519dd9a000f0c09fac3e6abdff1a487612ca4195b36605a37a2e84ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32b4c802232e3c8e6208c5e7d68c34d0f5051758 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/0a9395ce519dd9a000f0c09fac3e6abdff1a487612ca4195b36605a37a2e84ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1efd2a1c1fc1a3d26251741ea3ef3b231e85f66d32f5eeecfae845601cc3b0c2 +size 6956 diff --git a/data/2025/2504_13xxx/2504.13173/images/1b80b78fc17746439f6cf250cf3c3c38705c501929e1e69c47d3373b43897121.jpg b/data/2025/2504_13xxx/2504.13173/images/1b80b78fc17746439f6cf250cf3c3c38705c501929e1e69c47d3373b43897121.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7f8fa140d2fd309690862a84e7b7f8d75882b2e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/1b80b78fc17746439f6cf250cf3c3c38705c501929e1e69c47d3373b43897121.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a665cd293e37f9c1278ddc06713f16be8d55e153d0a8b27c9cc20ff871071ad +size 10242 diff --git a/data/2025/2504_13xxx/2504.13173/images/1d6a7b87a84a675d8fbf5949407933394dd73ec2c00a9ea617d43aaca6ccaa12.jpg b/data/2025/2504_13xxx/2504.13173/images/1d6a7b87a84a675d8fbf5949407933394dd73ec2c00a9ea617d43aaca6ccaa12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..001910ac8c98adb8b18acba8b35ddf375c84e54b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/1d6a7b87a84a675d8fbf5949407933394dd73ec2c00a9ea617d43aaca6ccaa12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e5392b6844cf0bdbfeea3b9c82237bdbdb5abe5f100cabcef44ae160bd26972 +size 4454 diff --git a/data/2025/2504_13xxx/2504.13173/images/1e71d292b87b5b94660e97445a73eff75c18a71916cb028bc365fee679360b73.jpg b/data/2025/2504_13xxx/2504.13173/images/1e71d292b87b5b94660e97445a73eff75c18a71916cb028bc365fee679360b73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..940848391b1949bc288c6e83029ef03fbab6bdb4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/1e71d292b87b5b94660e97445a73eff75c18a71916cb028bc365fee679360b73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b125db02ed72ab308c8b6fdec7cb10bbabd9923b2e2cbf0447a19d2d66083e6 +size 19871 diff --git a/data/2025/2504_13xxx/2504.13173/images/20c5125fed10048404b024a11c718e3f1e0d414743c2fa9dde5ee7dac4e734e0.jpg b/data/2025/2504_13xxx/2504.13173/images/20c5125fed10048404b024a11c718e3f1e0d414743c2fa9dde5ee7dac4e734e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3899c4eafdfd1199f9e525d9a0de3ee9c872018 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/20c5125fed10048404b024a11c718e3f1e0d414743c2fa9dde5ee7dac4e734e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2cc39ddd039b20bbf16905d8bfa4363a2c21efb1f6185535137a1ca5e1eab09 +size 5584 diff --git a/data/2025/2504_13xxx/2504.13173/images/217599465fb61359aa527b59a4521cfa484c2814e47cefdea51dc2e3e13a3ff6.jpg b/data/2025/2504_13xxx/2504.13173/images/217599465fb61359aa527b59a4521cfa484c2814e47cefdea51dc2e3e13a3ff6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..778a09c20909c05e917f3f7c2342cb9844d427df --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/217599465fb61359aa527b59a4521cfa484c2814e47cefdea51dc2e3e13a3ff6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a35cb6a9d95f8306a1e81679feae6af06145884d237cac139f0657159f1782e8 +size 10178 diff --git a/data/2025/2504_13xxx/2504.13173/images/23e8a3c068128a8a12d8568ecdde19b54a9ecb901162d0c610604a0abdd79fbe.jpg b/data/2025/2504_13xxx/2504.13173/images/23e8a3c068128a8a12d8568ecdde19b54a9ecb901162d0c610604a0abdd79fbe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d6d01d6470bdcc0df687270f92c6dcd391d78de --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/23e8a3c068128a8a12d8568ecdde19b54a9ecb901162d0c610604a0abdd79fbe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:542a11cf7f48d171fe84c1b7d020b9b78c63a7c0b72369a5a6b0e5ff56866eae +size 19788 diff --git a/data/2025/2504_13xxx/2504.13173/images/2a736aa1cc3e1f2073676420b2b757c56323282bcfd7e821464558383a1be3a3.jpg b/data/2025/2504_13xxx/2504.13173/images/2a736aa1cc3e1f2073676420b2b757c56323282bcfd7e821464558383a1be3a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42de319736683900c94f60e9e014aba9c8db0dcd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/2a736aa1cc3e1f2073676420b2b757c56323282bcfd7e821464558383a1be3a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:120ba55b00f57322455bb81e490078eccf7c04053ab6e779c83fb2cadcbbaaa1 +size 8464 diff --git a/data/2025/2504_13xxx/2504.13173/images/2c094c79d0c5c5d07d2462c9a20b77422b3b010ddf2082cd793ae09d0bc64a5d.jpg b/data/2025/2504_13xxx/2504.13173/images/2c094c79d0c5c5d07d2462c9a20b77422b3b010ddf2082cd793ae09d0bc64a5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fefe9180a17ee253b1c4cb842c52646bf5a4d74 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/2c094c79d0c5c5d07d2462c9a20b77422b3b010ddf2082cd793ae09d0bc64a5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6aa6af226e45f67e0201e520250867999efa91240aef470d5d622205ce9bb9a +size 7095 diff --git a/data/2025/2504_13xxx/2504.13173/images/2e046a24e952b90612b884a9e695d2aca8c1cf15a82464130d6131f20bfbc4bc.jpg b/data/2025/2504_13xxx/2504.13173/images/2e046a24e952b90612b884a9e695d2aca8c1cf15a82464130d6131f20bfbc4bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4615f79528759d0887b9bc248c2915f952465f6a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/2e046a24e952b90612b884a9e695d2aca8c1cf15a82464130d6131f20bfbc4bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:748194288e49a4006c4f0b7f350dda1a91cd3ee2d21e669be6a181fa3308b29d +size 18311 diff --git a/data/2025/2504_13xxx/2504.13173/images/2f01d768348f0ff12ac114dbd564983014fcd1732e75507df4d89a282f291b22.jpg b/data/2025/2504_13xxx/2504.13173/images/2f01d768348f0ff12ac114dbd564983014fcd1732e75507df4d89a282f291b22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a9c822c2879c27243451a7db741aab48a3e6bc9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/2f01d768348f0ff12ac114dbd564983014fcd1732e75507df4d89a282f291b22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75be5314fe2f96d660aa501185ddf5612ebff75629b7faec794bd7d6102fa5f6 +size 4652 diff --git a/data/2025/2504_13xxx/2504.13173/images/3329723ab8e514721e400978c83f650752b2cf21d4fc36cede7c1a33e9e7b66c.jpg b/data/2025/2504_13xxx/2504.13173/images/3329723ab8e514721e400978c83f650752b2cf21d4fc36cede7c1a33e9e7b66c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ee29e28b7faedea8d5ee494df865a3b85e60c23 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/3329723ab8e514721e400978c83f650752b2cf21d4fc36cede7c1a33e9e7b66c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90355eabd7d73abbdc799710a028da532d8bfe9b195aba92d4547e13f0f34b25 +size 790 diff --git a/data/2025/2504_13xxx/2504.13173/images/348a7fefd6264ac140f0fb64042a7134f5cda5eaa7d2dc9808f2ac8dfc62ec0e.jpg b/data/2025/2504_13xxx/2504.13173/images/348a7fefd6264ac140f0fb64042a7134f5cda5eaa7d2dc9808f2ac8dfc62ec0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08ecae25ae34294a768057a98e8b8e86226ec87f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/348a7fefd6264ac140f0fb64042a7134f5cda5eaa7d2dc9808f2ac8dfc62ec0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b32973ca6e6fb6ced7074d6d570b49785a2565692481172c4181639d9c67e17 +size 13947 diff --git a/data/2025/2504_13xxx/2504.13173/images/3ca7a2124d41e7f26c2de601d6a3cc4a9a0585a05b2b7776efb58ef94c514354.jpg b/data/2025/2504_13xxx/2504.13173/images/3ca7a2124d41e7f26c2de601d6a3cc4a9a0585a05b2b7776efb58ef94c514354.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f0c9795eeaac7b204b2f99ea64320e6474a052d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/3ca7a2124d41e7f26c2de601d6a3cc4a9a0585a05b2b7776efb58ef94c514354.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467b71c23c5c83b7b9df40ff3d92d3f5312ac5fc51d4dde1d92b2c0ff44595b7 +size 7458 diff --git a/data/2025/2504_13xxx/2504.13173/images/41e18f1774e205798aee024b6b97b0a05d32f79834cff545582d51fa15d3a06b.jpg b/data/2025/2504_13xxx/2504.13173/images/41e18f1774e205798aee024b6b97b0a05d32f79834cff545582d51fa15d3a06b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a75ebc8b741246a9924f6ac9094d0eea937d0c5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/41e18f1774e205798aee024b6b97b0a05d32f79834cff545582d51fa15d3a06b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6da89dbb1d8cc16923c9ca215967bb6d636f1833e910b2523afeb46a46fdd5f2 +size 10049 diff --git a/data/2025/2504_13xxx/2504.13173/images/45c18238da20b86ece6c49f73c0da7ab6603bbe33993974acf302e629ba56a20.jpg b/data/2025/2504_13xxx/2504.13173/images/45c18238da20b86ece6c49f73c0da7ab6603bbe33993974acf302e629ba56a20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f9051bf404bc447e2c4360bfb65aafacbdd452c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/45c18238da20b86ece6c49f73c0da7ab6603bbe33993974acf302e629ba56a20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:555f3e37f7c65780648c9648d17161e2f4111ae0e791d67edfa3068b838005c7 +size 22765 diff --git a/data/2025/2504_13xxx/2504.13173/images/46351f9f08e7407d5c80ee9f0ffc73c06e03ccc5f3563dfd30ce8a79ae29a1b7.jpg b/data/2025/2504_13xxx/2504.13173/images/46351f9f08e7407d5c80ee9f0ffc73c06e03ccc5f3563dfd30ce8a79ae29a1b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2531561c54f70b58861ecbea4292395bb2a265bf --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/46351f9f08e7407d5c80ee9f0ffc73c06e03ccc5f3563dfd30ce8a79ae29a1b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac5163e5befcd68ade7e7f35486bd31d30835811e163b4f8df1a673abd43b17 +size 21401 diff --git a/data/2025/2504_13xxx/2504.13173/images/4670138483116ccc15979168b92a20ec83b7454bc05327f30119009a0ca9e0c6.jpg b/data/2025/2504_13xxx/2504.13173/images/4670138483116ccc15979168b92a20ec83b7454bc05327f30119009a0ca9e0c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ea88fc292a76c93ba4ca709203fe50f0d85e949 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/4670138483116ccc15979168b92a20ec83b7454bc05327f30119009a0ca9e0c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:596ebc7e429e7d5b16491d7aff98d781b674a026aef8148cd4d612a045433435 +size 6973 diff --git a/data/2025/2504_13xxx/2504.13173/images/47c61bb3297e19291eef9fd18585bc1b7909eced6c0e4c2e60bf6bb6f112aa51.jpg b/data/2025/2504_13xxx/2504.13173/images/47c61bb3297e19291eef9fd18585bc1b7909eced6c0e4c2e60bf6bb6f112aa51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0e9d91e4219a4cc392ad1a857839dfdf89b955f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/47c61bb3297e19291eef9fd18585bc1b7909eced6c0e4c2e60bf6bb6f112aa51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e24fc83fa3694b647251c0a823932ff93eb02dcae6c035c2eb2ecaedc400fd5 +size 3863 diff --git a/data/2025/2504_13xxx/2504.13173/images/49d2d680aea288aa49db17399e5b37ca1e6308d3becf19d8f89c0a193794abf2.jpg b/data/2025/2504_13xxx/2504.13173/images/49d2d680aea288aa49db17399e5b37ca1e6308d3becf19d8f89c0a193794abf2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2005e2fdee5fb5bf6312385e59389989643e8e2a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/49d2d680aea288aa49db17399e5b37ca1e6308d3becf19d8f89c0a193794abf2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d51c9efd491aaac275ffcb01514a67685176e0aaf051fb8bb500ad1c3ab7a81 +size 9253 diff --git a/data/2025/2504_13xxx/2504.13173/images/524c1b4283649a6bdf72c52e3e89b019b5582b3cd8a6e613ade1c2d8d84d62be.jpg b/data/2025/2504_13xxx/2504.13173/images/524c1b4283649a6bdf72c52e3e89b019b5582b3cd8a6e613ade1c2d8d84d62be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56a449b2398eb1cb82d5118244a121fbf372a2be --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/524c1b4283649a6bdf72c52e3e89b019b5582b3cd8a6e613ade1c2d8d84d62be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34c085c0132b3e18a355ce20e499ce0b40fec4886ef6f4fab540ef3158fd9ea8 +size 4406 diff --git a/data/2025/2504_13xxx/2504.13173/images/5b943db11a74fd598a122c80f5abbba22c83b0d912d8b35b2bceeaf78cbd0309.jpg b/data/2025/2504_13xxx/2504.13173/images/5b943db11a74fd598a122c80f5abbba22c83b0d912d8b35b2bceeaf78cbd0309.jpg new file mode 100644 index 0000000000000000000000000000000000000000..181dfd3d6c5256b3d3ec18c30152537a4bd7c342 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/5b943db11a74fd598a122c80f5abbba22c83b0d912d8b35b2bceeaf78cbd0309.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:071d4bb597fb70ce276fa580d50dec7b2935a097dcd65453010c8e1f3f27f681 +size 3019 diff --git a/data/2025/2504_13xxx/2504.13173/images/5fcaaa66b990bc9ef3c4f4321be98b3555b40eb9a671ef6a62104bad2a17b709.jpg b/data/2025/2504_13xxx/2504.13173/images/5fcaaa66b990bc9ef3c4f4321be98b3555b40eb9a671ef6a62104bad2a17b709.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b25514124642e807c1d1742a83cced125da89d6e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/5fcaaa66b990bc9ef3c4f4321be98b3555b40eb9a671ef6a62104bad2a17b709.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9207a3d63541690433df9145910d4c8fb18834634954a8d9354a82217db768f +size 9083 diff --git a/data/2025/2504_13xxx/2504.13173/images/638fa6e5739015c4bb34eb4d0121982ad373fbdc5354756583417349ef715446.jpg b/data/2025/2504_13xxx/2504.13173/images/638fa6e5739015c4bb34eb4d0121982ad373fbdc5354756583417349ef715446.jpg new file mode 100644 index 0000000000000000000000000000000000000000..259e5651e51ff92a7521246e4ed27e88edca21b0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/638fa6e5739015c4bb34eb4d0121982ad373fbdc5354756583417349ef715446.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6d313a2960d3bdc684383f311ca4a04af3c4b1319236f6eaa015c95ebdfe62 +size 14186 diff --git a/data/2025/2504_13xxx/2504.13173/images/64cf270ae5ece253cd40baa97d7402eff0854d77b5d2ddd359c436d288230b19.jpg b/data/2025/2504_13xxx/2504.13173/images/64cf270ae5ece253cd40baa97d7402eff0854d77b5d2ddd359c436d288230b19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43f119f42cc4b07913046d30abf9652676e436ee --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/64cf270ae5ece253cd40baa97d7402eff0854d77b5d2ddd359c436d288230b19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:901f2020dc3ac059fc1d16d9d603a5769cba182040ee9fd704071820df463ca2 +size 7247 diff --git a/data/2025/2504_13xxx/2504.13173/images/6705fc4535ad29e9958b9671d64c4ff7f26650bb2f5d9abbc0e3fca44b362dc5.jpg b/data/2025/2504_13xxx/2504.13173/images/6705fc4535ad29e9958b9671d64c4ff7f26650bb2f5d9abbc0e3fca44b362dc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f34303ee2926a840e9ff33ce40a39bacca1b817 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/6705fc4535ad29e9958b9671d64c4ff7f26650bb2f5d9abbc0e3fca44b362dc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae9559c47dd39d32dc86ea3e10eae4dc239f330f14cabfd8feb17aceebc8475e +size 4768 diff --git a/data/2025/2504_13xxx/2504.13173/images/680672e1480e5ce8fc52ca864b5c92b471c769c034455b7a0f12484f7a574c70.jpg b/data/2025/2504_13xxx/2504.13173/images/680672e1480e5ce8fc52ca864b5c92b471c769c034455b7a0f12484f7a574c70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6645ffb58fb215671e3f59e926e9d1993dad19a6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/680672e1480e5ce8fc52ca864b5c92b471c769c034455b7a0f12484f7a574c70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bd294e92b8fa5dee5a94dd87702229880303c05cf636817e98dd23bcdbca592 +size 17075 diff --git a/data/2025/2504_13xxx/2504.13173/images/6872269beb2c79cf3c74cfd6c217260d93c6b062ce5d1a78710cdba618c4541b.jpg b/data/2025/2504_13xxx/2504.13173/images/6872269beb2c79cf3c74cfd6c217260d93c6b062ce5d1a78710cdba618c4541b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2269d15b8db132fc87bdeca1e14f2e91a3778c1b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/6872269beb2c79cf3c74cfd6c217260d93c6b062ce5d1a78710cdba618c4541b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4921c3f2c08cb8be3dbab3f444db05b4d67ed0d562729e74e991381006140de +size 44636 diff --git a/data/2025/2504_13xxx/2504.13173/images/6a5d6a5348855d190cec18bdb8b3711243d3a588e3e14914577dcdd59c7cd910.jpg b/data/2025/2504_13xxx/2504.13173/images/6a5d6a5348855d190cec18bdb8b3711243d3a588e3e14914577dcdd59c7cd910.jpg new file mode 100644 index 0000000000000000000000000000000000000000..002ac4b0c90643fd7bfa4577bc99c4fd1e27ff2e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/6a5d6a5348855d190cec18bdb8b3711243d3a588e3e14914577dcdd59c7cd910.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1977315c2c665a56124330469c278964a8f01f9e63028013fd44df713a6c05fe +size 11785 diff --git a/data/2025/2504_13xxx/2504.13173/images/6f6138ca1a100c7c01d4c5be0374fee01a58673f9ddfede85397b3d258447ee1.jpg b/data/2025/2504_13xxx/2504.13173/images/6f6138ca1a100c7c01d4c5be0374fee01a58673f9ddfede85397b3d258447ee1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a101c9b512a4a5c2f8601d126f90f9e7afb68e68 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/6f6138ca1a100c7c01d4c5be0374fee01a58673f9ddfede85397b3d258447ee1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae9361897a436aed708a5d26d943f29e7ec213f5b52e13efd4037cf7d7eb7d4e +size 4822 diff --git a/data/2025/2504_13xxx/2504.13173/images/736c0aee42d8244d6bf36c4864778f73d5974662cdb103a9c534a218935a9611.jpg b/data/2025/2504_13xxx/2504.13173/images/736c0aee42d8244d6bf36c4864778f73d5974662cdb103a9c534a218935a9611.jpg new file mode 100644 index 0000000000000000000000000000000000000000..571f58eddf09696d405121bb3b10786d8e9057c1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/736c0aee42d8244d6bf36c4864778f73d5974662cdb103a9c534a218935a9611.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1355de6b22f5cca368b3cd8c857ff36e19bf865291d832bad8e272859b8fd740 +size 30220 diff --git a/data/2025/2504_13xxx/2504.13173/images/74a4facb0826e1f05b0cb7f320e37e06f8cea5e9aedfe84d38e0340bed346b7f.jpg b/data/2025/2504_13xxx/2504.13173/images/74a4facb0826e1f05b0cb7f320e37e06f8cea5e9aedfe84d38e0340bed346b7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0019a34a85b958419a2c1fc08958741f072d8be --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/74a4facb0826e1f05b0cb7f320e37e06f8cea5e9aedfe84d38e0340bed346b7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:821970767ee647a527c9346cdc4056ad93d8eb0e5e46a96848abedd999e910f0 +size 7190 diff --git a/data/2025/2504_13xxx/2504.13173/images/75fe40f1a6a66feab69947ae112d64c78184fb16a456f9a46b288bf8efc32098.jpg b/data/2025/2504_13xxx/2504.13173/images/75fe40f1a6a66feab69947ae112d64c78184fb16a456f9a46b288bf8efc32098.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1db949915f213dac375b0c661b2cfeab5d4aba12 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/75fe40f1a6a66feab69947ae112d64c78184fb16a456f9a46b288bf8efc32098.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3b2dd555ec92b6365baa8671caa439297be4f3552a4027d2791ef400147ed3a +size 4408 diff --git a/data/2025/2504_13xxx/2504.13173/images/7f3725d584d1950412336fd521c5fe3fd51caa25f4aa98854ba39b2420f74a32.jpg b/data/2025/2504_13xxx/2504.13173/images/7f3725d584d1950412336fd521c5fe3fd51caa25f4aa98854ba39b2420f74a32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a530b8cecc8d4a19c9ad0a67ce21fd7fd22ab2f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/7f3725d584d1950412336fd521c5fe3fd51caa25f4aa98854ba39b2420f74a32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cb4a6fd8ad7304cb19e3ebbcf6d9b48fce505cfdf1158c0b85476ffd2faa7e6 +size 312312 diff --git a/data/2025/2504_13xxx/2504.13173/images/80a9a514a1cab65985fa34f572dcd4f24955f7607eb94f8a68c8f341579616f3.jpg b/data/2025/2504_13xxx/2504.13173/images/80a9a514a1cab65985fa34f572dcd4f24955f7607eb94f8a68c8f341579616f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5cca9448e12f1ffc3ac8c9640782ff79d4ba57f6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/80a9a514a1cab65985fa34f572dcd4f24955f7607eb94f8a68c8f341579616f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a10970480553f17590921f68eacda2becad106dcf3f450cd63cf2a14d1deab4f +size 3402 diff --git a/data/2025/2504_13xxx/2504.13173/images/82af14f2e0aaae35d69d79239be6a195272495eb0caac98230aecab4efe861aa.jpg b/data/2025/2504_13xxx/2504.13173/images/82af14f2e0aaae35d69d79239be6a195272495eb0caac98230aecab4efe861aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bde81eebf889c2ca4ee37d20d6ad39238eed983 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/82af14f2e0aaae35d69d79239be6a195272495eb0caac98230aecab4efe861aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d67672eed6eff6db5b570b320ffca3b8f233c307c3908a6c159ecdf47331f57 +size 3277 diff --git a/data/2025/2504_13xxx/2504.13173/images/85a921c0e5ac0811c346988a447963c727a9058481fbea39ac62dc440e750ccb.jpg b/data/2025/2504_13xxx/2504.13173/images/85a921c0e5ac0811c346988a447963c727a9058481fbea39ac62dc440e750ccb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..872b2f87becd439a71800b866d4ef1669b0c562f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/85a921c0e5ac0811c346988a447963c727a9058481fbea39ac62dc440e750ccb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7968b9a8414b3cd6be95c339c090f32c67b9d801044a3734a91e46b790f4182b +size 14862 diff --git a/data/2025/2504_13xxx/2504.13173/images/863b308888d95185d6eb78471e3445b0df9ff800e940976ad43e140062ce805a.jpg b/data/2025/2504_13xxx/2504.13173/images/863b308888d95185d6eb78471e3445b0df9ff800e940976ad43e140062ce805a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2035cd1c97384de61cb9e569627c211b60e183af --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/863b308888d95185d6eb78471e3445b0df9ff800e940976ad43e140062ce805a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:499fe4fa52407a62e1856a9dd90f720990faa79346daff8f671da09358dde3da +size 9594 diff --git a/data/2025/2504_13xxx/2504.13173/images/86e42072f55074973e119525ac0b4d70357b0c82a9dfab639385386274955ec6.jpg b/data/2025/2504_13xxx/2504.13173/images/86e42072f55074973e119525ac0b4d70357b0c82a9dfab639385386274955ec6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d12a1fbe6c65af0cdc706ec46c169541b87a308c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/86e42072f55074973e119525ac0b4d70357b0c82a9dfab639385386274955ec6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac701676b440a19e9d0f5d631ee47c6751671fe0e12b5216aed2e0739332ddcc +size 14073 diff --git a/data/2025/2504_13xxx/2504.13173/images/89af85ce7350411e6bbde1033f142e62f9faaf9dc2338bafb7405bd5b821d475.jpg b/data/2025/2504_13xxx/2504.13173/images/89af85ce7350411e6bbde1033f142e62f9faaf9dc2338bafb7405bd5b821d475.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3682335bc4a11f669b117f23eba7a1f547a58240 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/89af85ce7350411e6bbde1033f142e62f9faaf9dc2338bafb7405bd5b821d475.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f81a70559d833af27e87c8766f941dda312d30556e259aef93b98101f2c2b56 +size 5085 diff --git a/data/2025/2504_13xxx/2504.13173/images/8a7042b492ca4691b3ec9e5526cdec1fc978947e64dbebf07fd6cb0322beef74.jpg b/data/2025/2504_13xxx/2504.13173/images/8a7042b492ca4691b3ec9e5526cdec1fc978947e64dbebf07fd6cb0322beef74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8af2af0e3e0c59060f26455671a5a078c4e61e7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/8a7042b492ca4691b3ec9e5526cdec1fc978947e64dbebf07fd6cb0322beef74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:177846e69c6af1f0882f6825b8829cb0fcc94b914c0dea6d8c12ede77cb506ed +size 5497 diff --git a/data/2025/2504_13xxx/2504.13173/images/8c7d5fcd845dc003fc2e069ac8ba0ebf252f221a003508ea6e48a2f885f5075c.jpg b/data/2025/2504_13xxx/2504.13173/images/8c7d5fcd845dc003fc2e069ac8ba0ebf252f221a003508ea6e48a2f885f5075c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f4a121288b737ae6764ef1dddef3ad8a46a8f34 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/8c7d5fcd845dc003fc2e069ac8ba0ebf252f221a003508ea6e48a2f885f5075c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23e38a970b3a53b19414b90ab7b6cf9f8bfbbb25057b03f50df76707fe4e8af7 +size 5420 diff --git a/data/2025/2504_13xxx/2504.13173/images/8d6bffb8df93750e04bdb7ff0be4079a679cc12726ca66158bd46cc629edf8c8.jpg b/data/2025/2504_13xxx/2504.13173/images/8d6bffb8df93750e04bdb7ff0be4079a679cc12726ca66158bd46cc629edf8c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85aceae81d94b6e63efec7bc3c8e51d1a0f36a62 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/8d6bffb8df93750e04bdb7ff0be4079a679cc12726ca66158bd46cc629edf8c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71d5855eefeeb217d8e86308d76e01de6cdeb3fb78b90d3b3207edb6156444d6 +size 9581 diff --git a/data/2025/2504_13xxx/2504.13173/images/95a973ed3778a085d3105eb1d94e3724ad2b85011a7faee71af9164273cd987f.jpg b/data/2025/2504_13xxx/2504.13173/images/95a973ed3778a085d3105eb1d94e3724ad2b85011a7faee71af9164273cd987f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31bdebf673847c7a7675beec9139b6edeb7d2f3e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/95a973ed3778a085d3105eb1d94e3724ad2b85011a7faee71af9164273cd987f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ecbc97e499cd9cc4ab08eba153c25649995d62bc0ab6ba9ffd03773b577ec8c +size 14063 diff --git a/data/2025/2504_13xxx/2504.13173/images/96cd3ce06fdc6a0b1aafc10c2092653a94c089dba9bd731877fe06a975cb8caa.jpg b/data/2025/2504_13xxx/2504.13173/images/96cd3ce06fdc6a0b1aafc10c2092653a94c089dba9bd731877fe06a975cb8caa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84b98dc8f411f84b8d2fd96744f62c10cf9c00c4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/96cd3ce06fdc6a0b1aafc10c2092653a94c089dba9bd731877fe06a975cb8caa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb68d3a707b6b2be1e4b23a74046175589ca3f57f827ffd9e13d5df3a887cc12 +size 5512 diff --git a/data/2025/2504_13xxx/2504.13173/images/96fd0586f8ff19b8dc64739aa8cfed0612ce2586170d8796b768374b8ce866f9.jpg b/data/2025/2504_13xxx/2504.13173/images/96fd0586f8ff19b8dc64739aa8cfed0612ce2586170d8796b768374b8ce866f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..471baec767fce49d2b1fa3626fc03bde852f6927 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/96fd0586f8ff19b8dc64739aa8cfed0612ce2586170d8796b768374b8ce866f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b366d132c5a005e7ca5da7869464c0e38da0b69dbffe52d13f85ece895ae65b0 +size 4976 diff --git a/data/2025/2504_13xxx/2504.13173/images/998a3dee3d52105145aa5ef25e50506e70a19e6a6ab4092054d174e0f9ddea34.jpg b/data/2025/2504_13xxx/2504.13173/images/998a3dee3d52105145aa5ef25e50506e70a19e6a6ab4092054d174e0f9ddea34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82e375ccabc6cbf8514dce725eacc3a481b2ba82 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/998a3dee3d52105145aa5ef25e50506e70a19e6a6ab4092054d174e0f9ddea34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12b4070a69d1952b75eff910be65efeb6ae89a9983e88f2a1287ecf3a42618b8 +size 3969 diff --git a/data/2025/2504_13xxx/2504.13173/images/9b1352fc7030eb3363cc479fcf394be2f1162bce2d62c322474217700ed85aa1.jpg b/data/2025/2504_13xxx/2504.13173/images/9b1352fc7030eb3363cc479fcf394be2f1162bce2d62c322474217700ed85aa1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6f990f4214eda680febc7345e0b5ded8d38b039 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/9b1352fc7030eb3363cc479fcf394be2f1162bce2d62c322474217700ed85aa1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8979320b1c2495978a8341be4ed95d3a98f37e3d8b414dea588e2e7fe0e39cd +size 6147 diff --git a/data/2025/2504_13xxx/2504.13173/images/9cb01a969297d8878bd8358e093a6abd23c24cfb85585b92f9cd441c4a9e7943.jpg b/data/2025/2504_13xxx/2504.13173/images/9cb01a969297d8878bd8358e093a6abd23c24cfb85585b92f9cd441c4a9e7943.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b61143e29f442fbaefd867cd2cb4199f497ff4bd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/9cb01a969297d8878bd8358e093a6abd23c24cfb85585b92f9cd441c4a9e7943.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a8db8a161358f385f5603252bef0ef8f2703da6ddc16d430ac5ac265fb1a42b +size 16862 diff --git a/data/2025/2504_13xxx/2504.13173/images/9d623c722a5ab850992a05508faa29932404f7de735834c9391b80064d8ea420.jpg b/data/2025/2504_13xxx/2504.13173/images/9d623c722a5ab850992a05508faa29932404f7de735834c9391b80064d8ea420.jpg new file mode 100644 index 0000000000000000000000000000000000000000..022629977143ae97596bf9aedeb8014b112db888 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/9d623c722a5ab850992a05508faa29932404f7de735834c9391b80064d8ea420.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8128d04752f6cb920dcbff25968f41b5ebfe69daf4cd932951676aacea2b9bbd +size 55467 diff --git a/data/2025/2504_13xxx/2504.13173/images/9ff03c9a38b84e17a1435eb7100dbd7d1ccc7926dca27c68ad8371b6f988ca36.jpg b/data/2025/2504_13xxx/2504.13173/images/9ff03c9a38b84e17a1435eb7100dbd7d1ccc7926dca27c68ad8371b6f988ca36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2c48847428712f4b8adcdc0acd2accc74e8d207 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/9ff03c9a38b84e17a1435eb7100dbd7d1ccc7926dca27c68ad8371b6f988ca36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8b2dbcf61bea416711aa382e624799d261abaac872c603a98be9de9690c0023 +size 11973 diff --git a/data/2025/2504_13xxx/2504.13173/images/a0ae2ecc3a1fdcbb355b80baff04bab982ec5c72538465ed489eb68b9ab78288.jpg b/data/2025/2504_13xxx/2504.13173/images/a0ae2ecc3a1fdcbb355b80baff04bab982ec5c72538465ed489eb68b9ab78288.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a552291c9af5a53a59bfb5d4d4cd3c1cefad122 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/a0ae2ecc3a1fdcbb355b80baff04bab982ec5c72538465ed489eb68b9ab78288.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09fbe806998841233f2b0e1236f8df2922e1b43a05bb54a0f1e622699e8ed615 +size 17829 diff --git a/data/2025/2504_13xxx/2504.13173/images/a0d90278e9ce354c647573ee69cc6ddc7ab30921351794fa19c10a84c33ff0aa.jpg b/data/2025/2504_13xxx/2504.13173/images/a0d90278e9ce354c647573ee69cc6ddc7ab30921351794fa19c10a84c33ff0aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92d48c1edc1b7be8cce6c15d887a7a4f56b5713f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/a0d90278e9ce354c647573ee69cc6ddc7ab30921351794fa19c10a84c33ff0aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b0ee1e99f775dd4a14a2d718d0576f2b632b37040131c9c3b2f75b17fa984bc +size 5767 diff --git a/data/2025/2504_13xxx/2504.13173/images/ac58b0d11caed157dc5cbac705d701d05881951bf5489d33502d9f44106b1f7d.jpg b/data/2025/2504_13xxx/2504.13173/images/ac58b0d11caed157dc5cbac705d701d05881951bf5489d33502d9f44106b1f7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4fd06ca64b38268036cae6979ece6317b55cf370 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/ac58b0d11caed157dc5cbac705d701d05881951bf5489d33502d9f44106b1f7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3a149c42fd69b21465355ca53cf288e8ac0ab3f8495e91322c603c0b13eb1a +size 5092 diff --git a/data/2025/2504_13xxx/2504.13173/images/ae2ecbc4216e63040b842529ceb40ac1992903cfd2b1e8eaf5ee7be91e76f1e8.jpg b/data/2025/2504_13xxx/2504.13173/images/ae2ecbc4216e63040b842529ceb40ac1992903cfd2b1e8eaf5ee7be91e76f1e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be0fc1dd7130466f54cb605e3346648e39fb0120 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/ae2ecbc4216e63040b842529ceb40ac1992903cfd2b1e8eaf5ee7be91e76f1e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec9eb4f761fd95a4d471fd7181da7a0a67a798e848dfceac7342e9db3177b7b1 +size 10853 diff --git a/data/2025/2504_13xxx/2504.13173/images/aedc967d779c770fa16e708083c25b3ec1849465db97daf9f15117ce24ee11ec.jpg b/data/2025/2504_13xxx/2504.13173/images/aedc967d779c770fa16e708083c25b3ec1849465db97daf9f15117ce24ee11ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..925d88348d79f84922983ecc459617a7fa93e777 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/aedc967d779c770fa16e708083c25b3ec1849465db97daf9f15117ce24ee11ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da65c1cdfc992053191417484bb64d5b6fbf2da19def0ef218d47849a09f8ec +size 13508 diff --git a/data/2025/2504_13xxx/2504.13173/images/b37c3f16fd8638dc8e885a6a7c29c0d3b1ed3f936a851fcf456b6c53d4ea685b.jpg b/data/2025/2504_13xxx/2504.13173/images/b37c3f16fd8638dc8e885a6a7c29c0d3b1ed3f936a851fcf456b6c53d4ea685b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..627f42f754244dd55a1e108d77e9e4c66877c8fe --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/b37c3f16fd8638dc8e885a6a7c29c0d3b1ed3f936a851fcf456b6c53d4ea685b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75532a3448a04e4dbb609858bdda44a73b42a027e96cf3d1ab53ea2508ed6e0a +size 5044 diff --git a/data/2025/2504_13xxx/2504.13173/images/b42089c29f66a2bb51edc81510e39c88cec6e022f3a409e89770c123329cfb28.jpg b/data/2025/2504_13xxx/2504.13173/images/b42089c29f66a2bb51edc81510e39c88cec6e022f3a409e89770c123329cfb28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0598b7d36ce38cb12d741d43bd3b0378c5c566e7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/b42089c29f66a2bb51edc81510e39c88cec6e022f3a409e89770c123329cfb28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aa850c2ad6e6f5648de553fed279e1a5f541170bf9dd3c0df7ca25b9c86a90c +size 4923 diff --git a/data/2025/2504_13xxx/2504.13173/images/b5b7427b66217421eedcba20f2af4c82a7234c82d00f6d2aa5ab96e8051404f9.jpg b/data/2025/2504_13xxx/2504.13173/images/b5b7427b66217421eedcba20f2af4c82a7234c82d00f6d2aa5ab96e8051404f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afa31a629dbd0818d924e989250c7a7d79ca5f6a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/b5b7427b66217421eedcba20f2af4c82a7234c82d00f6d2aa5ab96e8051404f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05247ce69050e66dd50537c331bbda43ef1e3527db16bf458d2ce3b22da324f9 +size 7836 diff --git a/data/2025/2504_13xxx/2504.13173/images/b974fbd3411b1dfe18a168b2543d52e5228e8e6e8b1b76ee1ccda75f87e0e0e1.jpg b/data/2025/2504_13xxx/2504.13173/images/b974fbd3411b1dfe18a168b2543d52e5228e8e6e8b1b76ee1ccda75f87e0e0e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b883e9bc1fe709ebe986eca131c6e1dbb0ed6ca4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/b974fbd3411b1dfe18a168b2543d52e5228e8e6e8b1b76ee1ccda75f87e0e0e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7a91191ec31f547ddeb62bee085c1e1bc5a78ceb2eacfd2e6d8b0baff0ea079 +size 4664 diff --git a/data/2025/2504_13xxx/2504.13173/images/ba50f238ce5f33290fa7048c1ffda6bc107d7d5d876a41aa2487a777d0ad0ae7.jpg b/data/2025/2504_13xxx/2504.13173/images/ba50f238ce5f33290fa7048c1ffda6bc107d7d5d876a41aa2487a777d0ad0ae7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc94c0d6e0fc6716c92705ce8400cbbdc9eb6f24 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/ba50f238ce5f33290fa7048c1ffda6bc107d7d5d876a41aa2487a777d0ad0ae7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bac7ac77072dc2ea9bab90556b4cf3a540de3fc1426adf82c4feab55e7138b3d +size 6172 diff --git a/data/2025/2504_13xxx/2504.13173/images/bb38c6f7c2bb840b3f533a645aa391e2005cae92f583aa262a8d6028d4e35f08.jpg b/data/2025/2504_13xxx/2504.13173/images/bb38c6f7c2bb840b3f533a645aa391e2005cae92f583aa262a8d6028d4e35f08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19601625229e580d4d199c6e003a71527280f3dc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/bb38c6f7c2bb840b3f533a645aa391e2005cae92f583aa262a8d6028d4e35f08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6984340acdbc40ee1441e4ff53aa54d63edc2bbad3af4de643a9105f34fa77f1 +size 19211 diff --git a/data/2025/2504_13xxx/2504.13173/images/c2653b2e0de174c1d9cc2bb7d0c9c6da0cdcf0959473a141d4ed1459d62c3830.jpg b/data/2025/2504_13xxx/2504.13173/images/c2653b2e0de174c1d9cc2bb7d0c9c6da0cdcf0959473a141d4ed1459d62c3830.jpg new file mode 100644 index 0000000000000000000000000000000000000000..347781fc6f558f72f7ead20bac71a33b80b5e9b1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/c2653b2e0de174c1d9cc2bb7d0c9c6da0cdcf0959473a141d4ed1459d62c3830.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b102755481ab120797ef9d55562aa1ce2c506f660c2bf952a26e6551ac4bae22 +size 150234 diff --git a/data/2025/2504_13xxx/2504.13173/images/c52a53d3147a32bad3e24c86f4cf922d7462dfb8a43a44f7ab5bc53d1e1fcd3f.jpg b/data/2025/2504_13xxx/2504.13173/images/c52a53d3147a32bad3e24c86f4cf922d7462dfb8a43a44f7ab5bc53d1e1fcd3f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aab9addd083becb1c65afdbf2aa5aa1642ea033d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/c52a53d3147a32bad3e24c86f4cf922d7462dfb8a43a44f7ab5bc53d1e1fcd3f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f4a3d01010dbbed6ce1cbeeb711f49a527fa29df669bf96a4c0f482320d7d77 +size 6403 diff --git a/data/2025/2504_13xxx/2504.13173/images/c75e1ed65e4890c8f41a4677ad715405f0e110d55b7068f04d9b13a17c20dd7e.jpg b/data/2025/2504_13xxx/2504.13173/images/c75e1ed65e4890c8f41a4677ad715405f0e110d55b7068f04d9b13a17c20dd7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e24b63acd3ef7abf48dfedb27e1c5f8b4db2b2f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/c75e1ed65e4890c8f41a4677ad715405f0e110d55b7068f04d9b13a17c20dd7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe8922d3e397a4b6e8baac74c29122e5513c6b974346bef29034285d6aec7b57 +size 3574 diff --git a/data/2025/2504_13xxx/2504.13173/images/d60827df5b286083df57b389a9af0427c6ca7f2d218f37d8234aafba47e36b41.jpg b/data/2025/2504_13xxx/2504.13173/images/d60827df5b286083df57b389a9af0427c6ca7f2d218f37d8234aafba47e36b41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..525283c9db13e708867722dbe84aca3f741437f9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/d60827df5b286083df57b389a9af0427c6ca7f2d218f37d8234aafba47e36b41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d6360ae7c17a00b678f9aa18af34f6b7b5fef256c1c7b6d6062930efdd1c88 +size 6244 diff --git a/data/2025/2504_13xxx/2504.13173/images/d848497141f65c220dabb2ecd3e4dc8baf38a442a81f8389e5e292b1fbee3e99.jpg b/data/2025/2504_13xxx/2504.13173/images/d848497141f65c220dabb2ecd3e4dc8baf38a442a81f8389e5e292b1fbee3e99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe55235efce1e2624fd9671366bf2458d31f96ab --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/d848497141f65c220dabb2ecd3e4dc8baf38a442a81f8389e5e292b1fbee3e99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04e146971a15f409f694ff6809caa32025cf27ccb0824da274e75bbe2c449039 +size 13643 diff --git a/data/2025/2504_13xxx/2504.13173/images/deab8e4e67fb2160fcfcd6ec9a71fbf1da090b323fc2cfc874bb3bd89c14d04e.jpg b/data/2025/2504_13xxx/2504.13173/images/deab8e4e67fb2160fcfcd6ec9a71fbf1da090b323fc2cfc874bb3bd89c14d04e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd8ea2143008bba42039f1c533f18e9e2681ffd5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/deab8e4e67fb2160fcfcd6ec9a71fbf1da090b323fc2cfc874bb3bd89c14d04e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb0e3fffe33321e36240dc3259f586e554da417980cbb490a3190a22a1e69fa +size 5676 diff --git a/data/2025/2504_13xxx/2504.13173/images/e0124b6e871a145ca549691849d789cc253f7b1b9707c8ee69c9ce879be46b66.jpg b/data/2025/2504_13xxx/2504.13173/images/e0124b6e871a145ca549691849d789cc253f7b1b9707c8ee69c9ce879be46b66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab6f729dde61cc0d828f793a09e07974a64c9d10 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/e0124b6e871a145ca549691849d789cc253f7b1b9707c8ee69c9ce879be46b66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec5d42a72795a2c410ee40e3a7934bc06e969e0334fd23d8ab8acced80f05c22 +size 8657 diff --git a/data/2025/2504_13xxx/2504.13173/images/e133558c8c3bee1beea63c659fa263e8cff03265eb3eaa12333ad63238b2f34c.jpg b/data/2025/2504_13xxx/2504.13173/images/e133558c8c3bee1beea63c659fa263e8cff03265eb3eaa12333ad63238b2f34c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfaff01f81f6f7db3b656a0b581e74f5bc1c669e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/e133558c8c3bee1beea63c659fa263e8cff03265eb3eaa12333ad63238b2f34c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d08f3f8a51c3d7a742450789ae1f824888ca4823ab1da9d84a5d01d7da2b307 +size 15272 diff --git a/data/2025/2504_13xxx/2504.13173/images/e1fa15a95e49443ca33904e34dd76f9eeae7b899d5a0d58b39de8ce306998dbd.jpg b/data/2025/2504_13xxx/2504.13173/images/e1fa15a95e49443ca33904e34dd76f9eeae7b899d5a0d58b39de8ce306998dbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0736672c5b19bec68f0877c37c11ee27af4efe2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/e1fa15a95e49443ca33904e34dd76f9eeae7b899d5a0d58b39de8ce306998dbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:630d503d8795c9f9d7d821cd741ccf859e8b4a5adf8238751bf5a53181749b2e +size 6614 diff --git a/data/2025/2504_13xxx/2504.13173/images/eedbb2a68d612ba06fdfdba58b8ba19dcd6af7d8c89ad2b80e6e9ffecb747f21.jpg b/data/2025/2504_13xxx/2504.13173/images/eedbb2a68d612ba06fdfdba58b8ba19dcd6af7d8c89ad2b80e6e9ffecb747f21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b055785278426e807b635690dea110ed6534261 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/eedbb2a68d612ba06fdfdba58b8ba19dcd6af7d8c89ad2b80e6e9ffecb747f21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db4bceefcac69487d5ca75462eb84253080055d46601da1d7bd96762d2653bd2 +size 5505 diff --git a/data/2025/2504_13xxx/2504.13173/images/f01b51a49753ba3c681cb7abc4b98a1db893901f08685228a232863a3845858e.jpg b/data/2025/2504_13xxx/2504.13173/images/f01b51a49753ba3c681cb7abc4b98a1db893901f08685228a232863a3845858e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad9ecd4cd0f22132ab314787b74ee8870a4197d2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/f01b51a49753ba3c681cb7abc4b98a1db893901f08685228a232863a3845858e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b2ce8760f4c706d480934b12b0677dfb8cadd59243c25493f3cd2674e269632 +size 5698 diff --git a/data/2025/2504_13xxx/2504.13173/images/f5b314e89c68e81cb2f087136de86e716ba11e0e457e1752e6ffa962a3bddd22.jpg b/data/2025/2504_13xxx/2504.13173/images/f5b314e89c68e81cb2f087136de86e716ba11e0e457e1752e6ffa962a3bddd22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cd9156df27caf3ac448cf93133432d7261fd644 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/images/f5b314e89c68e81cb2f087136de86e716ba11e0e457e1752e6ffa962a3bddd22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5bb44c57701717b021bee0c9f8d58baede2a9fef7ebc61d3a2ccfd26994f6f8 +size 18101 diff --git a/data/2025/2504_13xxx/2504.13173/layout.json b/data/2025/2504_13xxx/2504.13173/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..343d54ace34c9e5eea8431b95e9d6da34f7b8660 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13173/layout.json @@ -0,0 +1,21841 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 87, + 109, + 545, + 150 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 109, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 87, + 109, + 545, + 150 + ], + "type": "text", + "content": "It’s All Connected: A Journey Through Test-Time Memorization, Attentional Bias, Retention, and Online Optimization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 138, + 170, + 492, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 170, + 492, + 186 + ], + "spans": [ + { + "bbox": [ + 138, + 170, + 492, + 186 + ], + "type": "text", + "content": "Ali Behrouz†, Meisam Razaviyayn†, Peilin Zhong†, and Vahab Mirrokni†" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 272, + 201, + 360, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 201, + 360, + 217 + ], + "spans": [ + { + "bbox": [ + 272, + 201, + 360, + 217 + ], + "type": "text", + "content": "Google Research" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 195, + 219, + 440, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 219, + 440, + 230 + ], + "spans": [ + { + "bbox": [ + 195, + 219, + 440, + 230 + ], + "type": "text", + "content": "{alibehrouz, Razaviyayn, peilinz, mirrokni}@google.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 297, + 255, + 334, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 255, + 334, + 266 + ], + "spans": [ + { + "bbox": [ + 297, + 255, + 334, + 266 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 93, + 271, + 538, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 271, + 538, + 447 + ], + "spans": [ + { + "bbox": [ + 93, + 271, + 538, + 447 + ], + "type": "text", + "content": "Designing efficient and effective architectural backbones has been in the core of research efforts to enhance the capability of foundation models. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we reconceptualize neural architectures, including Transformers, Titans, and modern linear recurrent neural networks as associative memory modules that learn a mapping of keys and values using an internal objective, referred to as attentional bias. Surprisingly, we observed that most existing sequence models leverage either (1) dot-product similarity, or (2) " + }, + { + "bbox": [ + 93, + 271, + 538, + 447 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 93, + 271, + 538, + 447 + ], + "type": "text", + "content": " regression objectives as their attentional bias. Going beyond these objectives, we present a set of alternative attentional bias configurations along with their effective approximations to stabilize their training procedure. We then reinterpret forgetting mechanisms in modern deep learning architectures as a form of retention regularization, providing a novel set of forget gates for sequence models. Building upon these insights, we present MIRAS, a general framework to design deep learning architectures based on four choices of: (i) associative memory architecture, (ii) attentional bias objective, (iii) retention gate, and (iv) memory learning algorithm. We present three novel sequence models—MONETA, YAAD, and MEMORA—that go beyond the power of existing linear RNNs while maintaining a fast parallelizable training process. Our experiments show different design choices in MIRAS yield models with varying strengths. For example, certain instances of MIRAS achieve exceptional performance in special tasks such as language modeling, commonsense reasoning, and recall intensive tasks, even outperforming Transformers and other modern linear recurrent models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 465, + 178, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 465, + 178, + 479 + ], + "spans": [ + { + "bbox": [ + 69, + 465, + 178, + 479 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 489, + 563, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 489, + 563, + 562 + ], + "spans": [ + { + "bbox": [ + 67, + 489, + 563, + 562 + ], + "type": "text", + "content": "Designing efficient architectural backbones for sequence modeling is a key to enhance the capability of foundation models in domains ranging from language (Behrouz et al. 2024c; Vaswani et al. 2017a) and computer vision (Dosovitskiy et al. 2020) to computational biology (Wang et al. 2024) and neuroscience (Behrouz et al. 2024a). While Transformers (Vaswani et al. 2017a), mainly due to their in-context learning and ability to learn at scale (Kaplan et al. 2020), have been firmly established as state-of-the-art (SOTA) models in sequence modeling, their quadratic time and space complexity limits their applicability in tasks that require long context modeling (Dalal et al. 2025; Li et al. 2024a; Liu et al. 2024b)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 567, + 564, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 567, + 564, + 675 + ], + "spans": [ + { + "bbox": [ + 67, + 567, + 564, + 675 + ], + "type": "text", + "content": "Recent efforts aim to overcome Transformer limitations in long-context modeling by designing efficient recurrent alternatives (Behrouz et al. 2024c; Neil et al. 2017; Smith et al. 2022). Unlike Transformer's linearly growing memory (i.e., the KV cache), these models compress the context into a fixed size memory, demanding improved memory management for comparable performance. To design more effective architectures, studies focus on improving memory capacity and its management by using/designing more expressive: (1) Learning rules: from Hebbian rule (Hebb 2005) to Delta rule (Neil et al. 2017); (2) Forget gates: from LSTM's (Schmidhuber et al. 1997) to Mamba2's (Dao et al. 2024) and then Titan's forget gates (Behrouz et al. 2024c); and (3) More expressive memory architectures: from vector-valued memory in RetNet (Sun et al. 2023) and LRU (Orvieto et al. 2023) to neural deep memory in Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 681, + 563, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 681, + 563, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 563, + 717 + ], + "type": "text", + "content": "At the core of these advancements lies a critical question: \"what is the underlying design framework behind these sequence models, and how can these models be enhanced?\" Taking inspiration from the broad definitions of associative memory and learning in neuropsychology literature (Okano et al. 2000), several studies discuss the connection between Transformers" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.13173v1 [cs.LG] 17 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 563, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 563, + 124 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 563, + 124 + ], + "type": "text", + "content": "and (linear) Recurrent Neural Networks (RNNs) with associative memory (Bietti et al. 2023; Hopfield 1982; Ramsauer et al. 2021). These studies, however, either: (1) lack a universal explanation to fully illustrate the underlying learning algorithms, (2) are limited to a specific definition of associative memory and lack generalizability, and/or (3) are unable to describe standard, widely used components such as forget gate." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 132, + 564, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 132, + 564, + 242 + ], + "spans": [ + { + "bbox": [ + 67, + 132, + 564, + 242 + ], + "type": "text", + "content": "Contributions. Inspired by the human cognitive phenomenon of attentional bias—the natural tendency to prioritize certain events or stimuli—we re-conceptualize neural architectures, including Transformers, Titans, and other modern linear recurrent neural networks based on a broad definition of associative memory with attentional bias. We define and formalize the concept of attentional bias as the internal memory objective of sequence models (see Section 3) that aims to learn the underlying mapping between inputs (i.e., keys and values). Our formulation reveals that almost all existing sequence models are associative memories that leverage the same type of attentional bias. We reinterpret existing forgetting mechanisms in modern deep learning architectures as a form of retention " + }, + { + "bbox": [ + 67, + 132, + 564, + 242 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 132, + 564, + 242 + ], + "type": "text", + "content": "-regularization for the attentional bias, and then provide a novel set of alternative retention gates (forget gate) for sequence models, providing new insights on how to balance learning new concepts and the retention of previously learned concepts." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 245, + 564, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 245, + 564, + 294 + ], + "spans": [ + { + "bbox": [ + 68, + 245, + 564, + 294 + ], + "type": "text", + "content": "Building upon our formulation of memory and forget gate, we present MIRAs1, a fundamental framework to design novel sequence modeling architectures by four choice of: (1) Attentional bias (i.e., memory objective), (2) Retention gate, (3) Memory architecture, and (4) Memory learning algorithm (i.e., optimizer). We motivate and discuss several novel design choices, leading to novel architectures beyond existing sequence modeling architectures." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 299, + 564, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 299, + 564, + 361 + ], + "spans": [ + { + "bbox": [ + 68, + 299, + 564, + 361 + ], + "type": "text", + "content": "Finally, we focus on three novel variants of MIRAS-MONETA, YAAD, and MEMORA—that are based on attentional biases beyond simple " + }, + { + "bbox": [ + 68, + 299, + 564, + 361 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 299, + 564, + 361 + ], + "type": "text", + "content": "-regression objective as well as novel retention gating mechanisms that are more robust than existing ones. We further perform experimental evaluations of these three variants on language modeling, common-sense reasoning, needle-in-haystack, and recall intensive tasks. The results illustrate the superior performance of these variants, outperforming state-of-the-art sequence models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 369, + 564, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 369, + 564, + 468 + ], + "spans": [ + { + "bbox": [ + 68, + 369, + 564, + 468 + ], + "type": "text", + "content": "Roadmap. In Section 2, we review literature and discuss relevant concepts that we use through the paper. In Section 3, we present and discuss the broad definition of associative memory with formally defining the concept of attentional bias. We then discuss two viewpoints—Learning-Retaining and Follow-the-Regularized-Leader (FTRL)—to interpret sequence modeling through the lens of optimization and prove the generality of Learning-Retaining over FTRL. In Section 4, we present our MIRAS framework and discuss how it unifies modern sequence models. In Section 5, to show the potential of MIRAS framework, we discuss a variety of novel design choices for (1) attentional bias, and (2) retention gate (forget gate). Later in Section 5.3, we present three novel sequence models as the variants of MIRAS, and then discuss how to train them in a parallelizable manner. Finally, our experimental evaluations are reported in Section 6." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 483, + 294, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 483, + 294, + 499 + ], + "spans": [ + { + "bbox": [ + 69, + 483, + 294, + 499 + ], + "type": "text", + "content": "2 Preliminaries and Background" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 508, + 487, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 508, + 487, + 521 + ], + "spans": [ + { + "bbox": [ + 68, + 508, + 487, + 521 + ], + "type": "text", + "content": "In this section, we review the related studies and background concepts that we use through the paper." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "type": "text", + "content": "Attention. Attention as the backbone of Transformers is a critical component that acts as their associative memory (Bietti et al. 2023). Given input " + }, + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^{N \\times d_{\\mathrm{in}}}" + }, + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "type": "text", + "content": ", causal attention computes output " + }, + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "type": "inline_equation", + "content": "y \\in \\mathbb{R}^{N \\times d_{\\mathrm{in}}}" + }, + { + "bbox": [ + 67, + 530, + 563, + 567 + ], + "type": "text", + "content": " based on Softmax over input dependent key, value, and query matrices:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 233, + 574, + 563, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 574, + 563, + 586 + ], + "spans": [ + { + "bbox": [ + 233, + 574, + 563, + 586 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} = x \\mathbf {W} _ {\\mathrm {Q}}, \\quad \\mathbf {K} = x \\mathbf {W} _ {\\mathrm {K}}, \\quad \\mathbf {V} = x \\mathbf {W} _ {\\mathrm {V}}, \\tag {1}", + "image_path": "ac58b0d11caed157dc5cbac705d701d05881951bf5489d33502d9f44106b1f7d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 231, + 589, + 563, + 629 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 589, + 563, + 629 + ], + "spans": [ + { + "bbox": [ + 231, + 589, + 563, + 629 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {i} = \\sum_ {j = 1} ^ {i} \\frac {\\exp \\left(\\mathbf {q} _ {i} ^ {\\top} \\mathbf {k} _ {j} / \\sqrt {d _ {\\mathrm {i n}}}\\right) \\mathbf {v} _ {j}}{\\sum_ {\\ell = 1} ^ {i} \\exp \\left(\\mathbf {q} _ {i} ^ {\\top} \\mathbf {k} _ {\\ell} / \\sqrt {d _ {\\mathrm {i n}}}\\right)}, \\tag {2}", + "image_path": "863b308888d95185d6eb78471e3445b0df9ff800e940976ad43e140062ce805a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{Q}}, \\mathbf{W}_{\\mathrm{K}}" + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{V}} \\in \\mathbb{R}^{d_{\\mathrm{in}} \\times d_{\\mathrm{in}}}" + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "text", + "content": " are learnable parameters. While Transformers achieve significant improvements compared to traditional Recurrent Neural Networks (RNNs)—such as LSTM (Schmidhuber et al. 1997), their complexity that requires at least " + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "inline_equation", + "content": "N \\times d" + }, + { + "bbox": [ + 67, + 637, + 563, + 698 + ], + "type": "text", + "content": " operators to calculate the output has been the main motivation for researchers to think about alternative architectures. We divide and review the research efforts to design alternative architectures into two groups: (1) Linear shallow memory recurrent models, (2) Deep memory modules." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 68, + 703, + 563, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 703, + 563, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 703, + 563, + 723 + ], + "type": "text", + "content": "1 \"Miras\" is the translation of \"Legacy\" in several languages: such as Persian, Arabic, and Turkish. We choose this name since this framework provides clear steps for future studies to design powerful sequence models based on their task at hand." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 742, + 320, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 320, + 751 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 320, + 751 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 258, + 76, + 376, + 90 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 76, + 376, + 90 + ], + "spans": [ + { + "bbox": [ + 258, + 76, + 376, + 90 + ], + "type": "text", + "content": "Associative Memory" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 91, + 99, + 179, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 99, + 179, + 108 + ], + "spans": [ + { + "bbox": [ + 91, + 99, + 179, + 108 + ], + "type": "text", + "content": "Memory Architecture" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 91, + 115, + 179, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 115, + 179, + 131 + ], + "spans": [ + { + "bbox": [ + 91, + 115, + 179, + 131 + ], + "type": "text", + "content": "The neural architecture that stores memories." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 139, + 178, + 205 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 78, + 139, + 111, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 139, + 111, + 148 + ], + "spans": [ + { + "bbox": [ + 78, + 139, + 111, + 148 + ], + "type": "text", + "content": "1. Vector" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 156, + 111, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 156, + 111, + 163 + ], + "spans": [ + { + "bbox": [ + 78, + 156, + 111, + 163 + ], + "type": "text", + "content": "2.Matrix" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 171, + 178, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 171, + 178, + 181 + ], + "spans": [ + { + "bbox": [ + 78, + 171, + 178, + 181 + ], + "type": "text", + "content": "3. Multilayer Perceptron (MLP)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 188, + 144, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 188, + 144, + 205 + ], + "spans": [ + { + "bbox": [ + 78, + 188, + 144, + 205 + ], + "type": "text", + "content": "4. Memory Mosaics" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 222, + 99, + 288, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 99, + 288, + 108 + ], + "spans": [ + { + "bbox": [ + 222, + 99, + 288, + 108 + ], + "type": "text", + "content": "Attentional Bias" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 206, + 115, + 304, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 115, + 304, + 124 + ], + "spans": [ + { + "bbox": [ + 206, + 115, + 304, + 124 + ], + "type": "text", + "content": "The memory internal objective." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 196, + 138, + 279, + 205 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 196, + 138, + 270, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 138, + 270, + 148 + ], + "spans": [ + { + "bbox": [ + 196, + 138, + 270, + 148 + ], + "type": "text", + "content": "1. " + }, + { + "bbox": [ + 196, + 138, + 270, + 148 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 196, + 138, + 270, + 148 + ], + "type": "text", + "content": " Regression Loss" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 196, + 156, + 279, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 156, + 279, + 165 + ], + "spans": [ + { + "bbox": [ + 196, + 156, + 279, + 165 + ], + "type": "text", + "content": "2. Dot Product Similarity" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 196, + 171, + 246, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 171, + 246, + 180 + ], + "spans": [ + { + "bbox": [ + 196, + 171, + 246, + 180 + ], + "type": "text", + "content": "3. Huber Loss" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 196, + 188, + 256, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 188, + 256, + 205 + ], + "spans": [ + { + "bbox": [ + 196, + 188, + 256, + 205 + ], + "type": "text", + "content": "4. KL-Divergence" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 343, + 99, + 405, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 99, + 405, + 108 + ], + "spans": [ + { + "bbox": [ + 343, + 99, + 405, + 108 + ], + "type": "text", + "content": "Retention Gate" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 321, + 115, + 429, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 115, + 429, + 132 + ], + "spans": [ + { + "bbox": [ + 321, + 115, + 429, + 132 + ], + "type": "text", + "content": "The gate to retain the past state of the memory." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 139, + 433, + 198 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 317, + 139, + 433, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 139, + 433, + 148 + ], + "spans": [ + { + "bbox": [ + 317, + 139, + 433, + 148 + ], + "type": "text", + "content": "1. " + }, + { + "bbox": [ + 317, + 139, + 433, + 148 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 317, + 139, + 433, + 148 + ], + "type": "text", + "content": " Regularization (Local or Global)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 156, + 410, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 156, + 410, + 165 + ], + "spans": [ + { + "bbox": [ + 317, + 156, + 410, + 165 + ], + "type": "text", + "content": "2. Elastic Net Regularization" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 171, + 375, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 171, + 375, + 181 + ], + "spans": [ + { + "bbox": [ + 317, + 171, + 375, + 181 + ], + "type": "text", + "content": "3. KL Divergence" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 188, + 392, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 188, + 392, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 188, + 392, + 198 + ], + "type": "text", + "content": "4. Bregman Divergence" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 457, + 99, + 536, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 457, + 99, + 536, + 109 + ], + "spans": [ + { + "bbox": [ + 457, + 99, + 536, + 109 + ], + "type": "text", + "content": "Memory Algorithm" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 451, + 116, + 542, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 451, + 116, + 542, + 134 + ], + "spans": [ + { + "bbox": [ + 451, + 116, + 542, + 134 + ], + "type": "text", + "content": "The algorithm that learns the mapping." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 437, + 138, + 529, + 205 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 437, + 138, + 518, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 437, + 138, + 518, + 148 + ], + "spans": [ + { + "bbox": [ + 437, + 138, + 518, + 148 + ], + "type": "text", + "content": "1. Gradient Descent (GD)" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 437, + 155, + 511, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 437, + 155, + 511, + 164 + ], + "spans": [ + { + "bbox": [ + 437, + 155, + 511, + 164 + ], + "type": "text", + "content": "2. GD with Momentum" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 437, + 171, + 503, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 437, + 171, + 503, + 180 + ], + "spans": [ + { + "bbox": [ + 437, + 171, + 503, + 180 + ], + "type": "text", + "content": "3. Newton's Method" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 437, + 187, + 529, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 437, + 187, + 529, + 205 + ], + "spans": [ + { + "bbox": [ + 437, + 187, + 529, + 205 + ], + "type": "text", + "content": "4. Non-parametric Solutions ..." + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 71, + 228, + 561, + 318 + ], + "blocks": [ + { + "bbox": [ + 142, + 217, + 489, + 226 + ], + "lines": [ + { + "bbox": [ + 142, + 217, + 489, + 226 + ], + "spans": [ + { + "bbox": [ + 142, + 217, + 489, + 226 + ], + "type": "text", + "content": "Associative Memory is a neural network that learns to map keys to values based on an Attentional Bias objective." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 71, + 228, + 561, + 318 + ], + "lines": [ + { + "bbox": [ + 71, + 228, + 561, + 318 + ], + "spans": [ + { + "bbox": [ + 71, + 228, + 561, + 318 + ], + "type": "image", + "image_path": "6872269beb2c79cf3c74cfd6c217260d93c6b062ce5d1a78710cdba618c4541b.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 327, + 564, + 387 + ], + "lines": [ + { + "bbox": [ + 67, + 327, + 564, + 387 + ], + "spans": [ + { + "bbox": [ + 67, + 327, + 564, + 387 + ], + "type": "text", + "content": "Figure 1: The overview of MIRAS framework. MIRAS is based on four critical choices of (1) memory architecture, (2) attentional bias, (3) retention gate, and (4) memory learning algorithm. In this framework, the memory architecture determines the model capacity to memorize; attentional bias is responsible for modeling the underlying mapping patterns; retention gate determines how to balance learning new concepts and the retention of previously learned concepts; and memory learning algorithm is responsible for memory management." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "text", + "content": "(Linear) Recurrent Models. For many years, non-linear (gated) recurrent neural networks had been the de facto architectural backbones in deep learning (Greff et al. 2016). Their recurrent nature, however, results in non-parallelizable training, making their large scale training infeasible. To this end, in recent years, linear RNNs as alternatives to both Transformers and non-linear RNNs attract much attention mainly due to their parallelizable and linear-time training while maintaining competitive performance (Peng et al. 2025a; Sun et al. 2023; Yang et al. 2024c). Earlier variants of linear RNNs (De et al. 2024; Sun et al. 2023; Yang et al. 2024b), which mostly are based on Hebbian learning rule (Hebb 2005), aim to compress the data into their vector-valued (or matrix-valued) memory (De et al. 2024; Katharopoulos et al. 2020; Liu et al. 2024a; Sun et al. 2023; Yang et al. 2024b). Let " + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t \\in \\mathbb{R}^{d \\times n}" + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "text", + "content": " be the memory (" + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "text", + "content": " means vector-valued memory), and " + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "inline_equation", + "content": "\\mathbf{k}, \\mathbf{v} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "text", + "content": " are keys and values (i.e., projection of input " + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "inline_equation", + "content": "x_t \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 407, + 563, + 526 + ], + "type": "text", + "content": "), a simple general formulation for such linear RNNs can be written as:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 264, + 536, + 563, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 536, + 563, + 550 + ], + "spans": [ + { + "bbox": [ + 264, + 536, + 563, + 550 + ], + "type": "interline_equation", + "content": "\\mathcal {M} _ {t} = A _ {t} * \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}, \\tag {3}", + "image_path": "47c61bb3297e19291eef9fd18585bc1b7909eced6c0e4c2e60bf6bb6f112aa51.jpg" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "type": "text", + "content": " is an arbitrary associative operator and " + }, + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 67, + 559, + 563, + 631 + ], + "type": "text", + "content": " is a data-(in)dependent diagonal matrix or a scalar (Yang et al. 2024c). Despite the efficiency that comes with the linear recurrent nature of these models, the memory can overflow mainly due to the additive (without replacement) nature of Hebbian learning rule, resulting in limited memory capacity and limited expressive power in in-context learning tasks. Moreover, the vector-valued memory of these architectures can limit their ability to learn/memorize large context window, mainly due to the limited expressive power of memory to learn the underlying patterns of data (Behrouz et al. 2024c; Sun et al. 2024)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 67, + 637, + 564, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 637, + 564, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 637, + 564, + 721 + ], + "type": "text", + "content": "To address the above mentioned limitations, recurrent models that use a matrix-valued memory with Delta learning rule has gained popularity in recent years (Neil et al. 2017; Schlag et al. 2021; Yang et al. 2024c). Despite significant advantages, even these delta-rule-based recurrent models face theoretical limitations (Irie et al. 2023) with moderate performance in practice (Yang et al. 2024c). Recently, several studies aim to improve the performance of such models by adding scalar or channel-wise forget gate mechanisms (Peng et al. 2025b; Yang et al. 2024a), using negative eigenvalues (Grazzi et al. 2024), and multiple learning steps (Siems et al. 2025). They, however, still suffer from performance drop in long context, mainly due to the less expressive memory architectures (Behrouz et al. 2024c)." + } + ] + } + ], + "index": 35 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 140, + 561, + 397 + ], + "blocks": [ + { + "bbox": [ + 67, + 72, + 564, + 133 + ], + "lines": [ + { + "bbox": [ + 67, + 72, + 564, + 133 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 564, + 133 + ], + "type": "text", + "content": "Table 1: Overview of recent sequence models in MIRAS framework perspective. Surprisingly, all models are using the same type of attentional bias and regularization (forget gate). Note that these architectural choices does not uniquely identify the backbone as there are other design choices (e.g., input-dependency, channel-wise parameters, etc.) as well as the use of other components such as attention, convolutions, etc. Note that for attentional bias and retention gate, we are referring to the original design of MIRAS, discussed in Equation 4 and Remark 1." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 140, + 561, + 397 + ], + "lines": [ + { + "bbox": [ + 71, + 140, + 561, + 397 + ], + "spans": [ + { + "bbox": [ + 71, + 140, + 561, + 397 + ], + "type": "table", + "html": "
ModelMemory ArchitectureAttentional BiasRetention Gate†Memory AlgorithmMemory Write Operation
Shallow Memory
RetNet (2023)VectorDot-ProductL2GDMt=αMt-1+vtktT
Transformer (2017)MatrixL2-NonparametricMt=Mt-1∪{kt, vt}
LA (2021)MatrixDot-Product-GDMt=Mt-1+vtktT
DFWMatrixDot-ProductL2GDMt=(βtαT) ⊙ Mt-1+vtktT
Lightening Attention (2025)MatrixDot-ProductL2GDMt=αMt-1+vtktT
GLA (2024)MatrixDot-ProductL2GDMt=Diag(αt)Mt-1+vtktT
Mamba (2024)MatrixDot-ProductL2GDMt=αMt-1+vtktT
HGRN2 (2024)MatrixL1L2GDMt=Diag(αt)Mt-1+vt(1-αt)T
DeltaNet (2017)MatrixL2-GDMt=(I-βtktkT)Mt-1+βtvtktT
Longhorn (2024)MatrixL2-Implicit GDMt=(I-βtktkT)Mt-1+(βt1+ktkβt)xtkT
TTT-Linear (2024)MatrixL2-GDMt=Mt-1-η∇L(Mt-1, xt)
Gated DeltaNet (2024)MatrixL2L2GDMt=(αt(I-βtktkT))Mt-1+βtvtktT
RWKV-7 (2025)MatrixL2L2GDMt=diag(αt)(I-βtktkT)Mt-1+βtvtktT
DeltaProduct (2025)MatrixL2L2MGD*Mt=(αtΠi=1n(I-βt,ikt,i)T)Mt-1+Σj=1nΠi=j(I-βt,ivtj,kj,i)
Deep Memory
TTT-MLP (2024)2-layer MLPL2-GDMt=Mt-1-η∇L(Mt-1;kt, vt)
Titans-LMM (2024)k-layer MLPL2L2GD + MomentumMt=αMt-1-St, where St=ηSt-1-θt∇L(Mt-1;kt, vt)
MONETA (ours)2-layer MLPLpLqGDAt=AtA1-ηt∇lp(Wt-1;kt, vt), Wt=At/||At||q-2
YAAD (ours)2-layer MLPHuberL2GDWt=atWt-1-(ηt∇ε2(Wt-1;kt, vt) if ||M(kt)-vt|≤δt, ηtδt∇ε1(Wt-1;kt, vt) Otherwise.
MEMORA (ours)2-layer MLPL2KLGDWt=Softmax(αt log(Wt-1)-ηt∇ε2(Wt-1;kt, vt))
", + "image_path": "c2653b2e0de174c1d9cc2bb7d0c9c6da0cdcf0959473a141d4ed1459d62c3830.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 75, + 398, + 205, + 406 + ], + "lines": [ + { + "bbox": [ + 75, + 398, + 205, + 406 + ], + "spans": [ + { + "bbox": [ + 75, + 398, + 205, + 406 + ], + "type": "text", + "content": "* is using multiple rounds of GD per token." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 75, + 406, + 545, + 415 + ], + "lines": [ + { + "bbox": [ + 75, + 406, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 75, + 406, + 545, + 415 + ], + "type": "text", + "content": "For the sake of clarity, we use L2 for all modified L2-like regularizations. However, in fact, only Titans and RWKV-7 are using L2 retention gate (see Section 4)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 437, + 563, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 437, + 563, + 570 + ], + "spans": [ + { + "bbox": [ + 67, + 437, + 563, + 570 + ], + "type": "text", + "content": "Deep Memory Module: Titans and Test Time Training. To overcome the limited memory and to extend the effective context length of deep sequence models, more recent studies focus on a new generation of architectures with deep memory module (Behrouz et al. 2024c; Sun et al. 2024). These architectures are built on the meta-learning perspective, where the memory is an MLP architecture that is updated using gradient descent (with momentum) (Behrouz et al. 2024c; Sun et al. 2024). Sun et al. (2024) further provide a unifying perspective that how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models. Recently, in a concurrent work to ours, Wang et al. (2025) show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss. It, however, still remains unanswered that \"What is the underlying design framework behind these sequence models that can accurately unify existing architectures?\" Moreover, the role of forget gates and its alternative choices in modern sequence models is surprisingly less explored." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 586, + 440, + 603 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 440, + 603 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 440, + 603 + ], + "type": "text", + "content": "3 Associative Memory, Attentional Bias, and Retention" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "type": "text", + "content": "Associative memory, which is an inseparable component of learning in humans (Terry 2017), has been the inspiration for many artificial neural architectures in the literature (Behrouz et al. 2024c; Hopfield 1982; Neil et al. 2017). These studies, however, define instances of the concept of associative memory, limiting the architecture to a specific class of similarity metrics between entities (i.e., keys and values). That is, broadly speaking, associative memory is an operator that maps a set of keys " + }, + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "type": "text", + "content": " to a set of values " + }, + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 67, + 611, + 563, + 685 + ], + "type": "text", + "content": ", and so to learn the underlying mapping patterns in data, it requires an objective that targets a type of memory and measures the quality of learned mappings:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "text", + "content": "Definition 3.1 (Associative Memory and Attentional Bias). Given a set of keys " + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{K} \\subseteq \\mathbb{R}^{d_k}" + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "text", + "content": " and values " + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{V} \\subseteq \\mathbb{R}^{d_o}" + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "text", + "content": ", associative memory is an operator " + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{M}: \\mathcal{K} \\to \\mathcal{V}" + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "text", + "content": ". Learning the mapping of associative memory is based on an objective " + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 689, + 563, + 715 + ], + "type": "text", + "content": ", called" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 742, + 320, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 320, + 751 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 320, + 751 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 73, + 464, + 86 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 464, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 464, + 86 + ], + "type": "text", + "content": "Attentional Bias, that determines the type of memory and its tendency to prioritize some events:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 249, + 95, + 563, + 112 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 95, + 563, + 112 + ], + "spans": [ + { + "bbox": [ + 249, + 95, + 563, + 112 + ], + "type": "interline_equation", + "content": "\\mathcal {M} ^ {*} = \\arg \\min _ {\\mathcal {M}} \\quad \\mathcal {L} (\\mathcal {M} (\\mathcal {K}); \\mathcal {V}). \\tag {4}", + "image_path": "6705fc4535ad29e9958b9671d64c4ff7f26650bb2f5d9abbc0e3fca44b362dc5.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 127, + 186, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 127, + 186, + 138 + ], + "spans": [ + { + "bbox": [ + 69, + 127, + 186, + 138 + ], + "type": "text", + "content": "A few remarks are in order:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "spans": [ + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "text", + "content": "Remark 1. When we parameterize the memory with parameter " + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "text", + "content": ", we use " + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(W, \\mathbf{k})" + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "text", + "content": ". In this parametric setting, the optimization problem in (4) should be performed over the parameter " + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "text", + "content": ". Furthermore, in the parametric setup, we might use an additional regularization " + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(W)" + }, + { + "bbox": [ + 68, + 145, + 563, + 182 + ], + "type": "text", + "content": " to control the retaining of the past data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 187, + 564, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 187, + 564, + 235 + ], + "spans": [ + { + "bbox": [ + 68, + 187, + 564, + 235 + ], + "type": "text", + "content": "Remark 2. Learning the mapping between keys and values (Equation 4) is a meta-learning problem, in which the attentional bias is optimized in the inner-loop and all other parameters of the neural network (e.g., linear projections, convolutions, etc.) are optimized in the outer-loop. Therefore, the model learns how to store the data into its parameters at test time (Behrouz et al. 2024c; Sun et al. 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 250, + 484, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 250, + 484, + 264 + ], + "spans": [ + { + "bbox": [ + 68, + 250, + 484, + 264 + ], + "type": "text", + "content": "3.1 Learning to Memorize and to Retain Through the Lens of Optimization" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 269, + 563, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 269, + 563, + 305 + ], + "spans": [ + { + "bbox": [ + 68, + 269, + 563, + 305 + ], + "type": "text", + "content": "Definition 3.1 translates the design of a neural architecture based on the concept of associative memory to learning the underlying mapping between keys and values, by minimizing an objective " + }, + { + "bbox": [ + 68, + 269, + 563, + 305 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 68, + 269, + 563, + 305 + ], + "type": "text", + "content": ". To optimize Equation 4, one simple approach is to utilize the idea of gradient descent. Specifically, given a new pair of keys and values, we update the memory as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 250, + 315, + 563, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 315, + 563, + 328 + ], + "spans": [ + { + "bbox": [ + 250, + 315, + 563, + 328 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t - 1} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\tag {5}", + "image_path": "2f01d768348f0ff12ac114dbd564983014fcd1732e75507df4d89a282f291b22.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 337, + 563, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 337, + 563, + 397 + ], + "spans": [ + { + "bbox": [ + 68, + 337, + 563, + 397 + ], + "type": "text", + "content": "where, for simplicity, we use the definition " + }, + { + "bbox": [ + 68, + 337, + 563, + 397 + ], + "type": "inline_equation", + "content": "\\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\coloneqq \\mathcal{L}(\\mathcal{M}(W; \\mathbf{k}_t), \\mathbf{v}_t)" + }, + { + "bbox": [ + 68, + 337, + 563, + 397 + ], + "type": "text", + "content": ". Behrouz et al. (2024c) re-interpret the formulation as a momentary surprise metric, where the model memorizes tokens that violates the expectation of the objective (i.e., being surprising to the memory). Although the choice of objective is an important step to fully interpret Equation 5 (which we discuss in detail in Section 5), there are different viewpoints to interpret this update rule in its general format, which later can help us to go beyond existing architectures:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 411, + 469, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 411, + 469, + 426 + ], + "spans": [ + { + "bbox": [ + 68, + 411, + 469, + 426 + ], + "type": "text", + "content": "3.2 Viewpoint 1: Online Regression and Follow-The-Regularized-Leader" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 431, + 499, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 431, + 499, + 443 + ], + "spans": [ + { + "bbox": [ + 68, + 431, + 499, + 443 + ], + "type": "text", + "content": "Equation (5) can be viewed as one step of online gradient descent over the sequence of the loss functions" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 221, + 453, + 563, + 466 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 453, + 563, + 466 + ], + "spans": [ + { + "bbox": [ + 221, + 453, + 563, + 466 + ], + "type": "interline_equation", + "content": "\\ell \\left(W; \\mathbf {k} _ {1}, \\mathbf {v} _ {1}\\right), \\ell \\left(W; \\mathbf {k} _ {2}, \\mathbf {v} _ {2}\\right), \\dots , \\ell \\left(W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\dots . \\tag {6}", + "image_path": "d60827df5b286083df57b389a9af0427c6ca7f2d218f37d8234aafba47e36b41.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 475, + 563, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 475, + 563, + 511 + ], + "spans": [ + { + "bbox": [ + 68, + 475, + 563, + 511 + ], + "type": "text", + "content": "It is well known that the online gradient descent can be viewed as a special case of Follow-The-Regularized-Leader (FTRL) algorithm with a special choice of loss functions (Shalev-Shwartz et al. 2012, Chapter 2) and (Hazan et al. 2016). Specifically, assuming " + }, + { + "bbox": [ + 68, + 475, + 563, + 511 + ], + "type": "inline_equation", + "content": "W_0 = 0" + }, + { + "bbox": [ + 68, + 475, + 563, + 511 + ], + "type": "text", + "content": ", the update rule in (5) is equivalent to" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 194, + 521, + 563, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 521, + 563, + 552 + ], + "spans": [ + { + "bbox": [ + 194, + 521, + 563, + 552 + ], + "type": "interline_equation", + "content": "W _ {t} = \\arg \\min _ {W} \\quad \\sum_ {i = 1} ^ {t} \\left\\langle W - W _ {i - 1}, \\nabla \\ell \\left(W _ {i - 1}; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) \\right\\rangle + \\frac {1}{2 \\eta} \\| W \\| _ {2} ^ {2}, \\tag {7}", + "image_path": "49d2d680aea288aa49db17399e5b37ca1e6308d3becf19d8f89c0a193794abf2.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "spans": [ + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "text", + "content": "where the term " + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "inline_equation", + "content": "\\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; \\mathbf{k}_i, \\mathbf{v}_i) \\rangle" + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "text", + "content": " is the local linear approximation of the original loss at time " + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "text", + "content": " and the second term is a regularization term. While the first part " + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{t} \\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; \\mathbf{k}_i, \\mathbf{v}_i) \\rangle" + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "text", + "content": " measures how well can the memory learn all the past tokens, the second term " + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "inline_equation", + "content": "\\frac{1}{2\\eta} \\|W\\|_2^2" + }, + { + "bbox": [ + 68, + 559, + 562, + 597 + ], + "type": "text", + "content": " penalizes the memory update with respect to the size of memory." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 602, + 564, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 602, + 564, + 662 + ], + "spans": [ + { + "bbox": [ + 68, + 602, + 564, + 662 + ], + "type": "text", + "content": "Equation (7) uses linear approximation of the loss function and quadratic regularization. We can, however, in principle use other approximations of the loss function as well as other regularization functions, as used in the past in online optimization (Hazan et al. 2016; Shalev-Shwartz et al. 2012) or in general optimization (Miral 2015; Razaviyayn et al. 2013). Such changes are the idea behind the development of other optimization algorithms such mirror descent. More specifically, we can generalize the update rule in (7) to the form:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 214, + 677, + 563, + 726 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 677, + 563, + 726 + ], + "spans": [ + { + "bbox": [ + 214, + 677, + 563, + 726 + ], + "type": "interline_equation", + "content": "W _ {t} = \\arg \\min _ {W \\in \\mathcal {W}} \\underbrace {\\sum_ {i = 1} ^ {t} \\widehat {\\ell_ {i}} (W ; \\mathbf {k} _ {i} , \\mathbf {v} _ {i})} _ {\\text {A t t e n t i o n a l B i a s}} + \\underbrace {\\frac {1}{\\eta_ {t}} \\mathcal {R} _ {t} (W)} _ {\\text {M e m o r y S t a b i l i t y}}. \\tag {FTRLViewpoint}", + "image_path": "348a7fefd6264ac140f0fb64042a7134f5cda5eaa7d2dc9808f2ac8dfc62ec0e.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": "In this update rule, the term " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{t} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i)" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": " aims at memorizing the tokens at test time, while the term " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(W)" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": " regularizes the learning dynamics and take the size of the memory into account when updating it by a new incoming data. Choosing different loss functions " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "\\widehat{\\ell}_i(W; x_i)" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": " and the regularization term " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "\\frac{1}{\\eta_t} \\mathcal{R}_t(W)" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": " can lead to different algorithms such as (online) gradient descent or mirror descent. In this generalization, " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "\\eta_t" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": " to can be data-dependent. Moreover, we will allow imposing constraint " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": " on the choice " + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 72, + 563, + 136 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 148, + 526, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 148, + 526, + 163 + ], + "spans": [ + { + "bbox": [ + 68, + 148, + 526, + 163 + ], + "type": "text", + "content": "3.3 Viewpoint 2: Learning the Latest Token While Retaining Previous Information" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "spans": [ + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "type": "text", + "content": "Another way to interpret the update rule (5) is to view it as learning from the latest key-value pair " + }, + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "type": "inline_equation", + "content": "(\\mathbf{k}_i, \\mathbf{v}_i)" + }, + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "type": "text", + "content": " (via using its gradient or surprise metric), while staying close to the previous state " + }, + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "type": "inline_equation", + "content": "W_{t-1}" + }, + { + "bbox": [ + 67, + 168, + 563, + 205 + ], + "type": "text", + "content": " to retain the previously memorized tokens. Formally, (5) is equivalent to" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 184, + 213, + 447, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 213, + 447, + 237 + ], + "spans": [ + { + "bbox": [ + 184, + 213, + 447, + 237 + ], + "type": "interline_equation", + "content": "W _ {t} = \\arg \\min _ {W} \\left\\langle W - W _ {t - 1}, \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\right\\rangle + \\frac {1}{2 \\eta_ {t}} \\left\\| W - W _ {t - 1} \\right\\| _ {2} ^ {2}", + "image_path": "b5b7427b66217421eedcba20f2af4c82a7234c82d00f6d2aa5ab96e8051404f9.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "text", + "content": "The first term locally approximates " + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "inline_equation", + "content": "\\ell(W; \\mathbf{k}_t, \\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "text", + "content": " around the previous state " + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "inline_equation", + "content": "W_{t-1}" + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "text", + "content": ", while the last term regularizes deviations from " + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "inline_equation", + "content": "W_{t-1}" + }, + { + "bbox": [ + 67, + 246, + 562, + 270 + ], + "type": "text", + "content": ". This form can generalize to" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 296, + 563, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 296, + 563, + 328 + ], + "spans": [ + { + "bbox": [ + 156, + 296, + 563, + 328 + ], + "type": "interline_equation", + "content": "W _ {t} = \\arg \\min _ {W \\in \\mathcal {W}} \\underbrace {\\widetilde {\\ell_ {t}} (W ; \\mathbf {k} _ {t} , \\mathbf {v} _ {t})} _ {\\text {A t t e n t i o n a l B i a s}} + \\underbrace {\\operatorname {R e t} _ {t} (W , W _ {t - 1})} _ {\\text {R e t e n t i o n}}, \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\text {(L e a r n i n g - R e t a i n i n g V i e w p o i n t)}", + "image_path": "95a973ed3778a085d3105eb1d94e3724ad2b85011a7faee71af9164273cd987f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "content": "where the term " + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell_t} (W;\\mathbf{k}_t,\\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "content": " is an approximation of " + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "inline_equation", + "content": "\\ell (W;\\mathbf{k}_t,\\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "content": " and minimizing it corresponds to Learning from the new concepts " + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "inline_equation", + "content": "(\\mathbf{k}_t,\\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "content": ". The second term " + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(W,W_{t - 1})" + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "content": " regularizes the changes in " + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 336, + 563, + 373 + ], + "type": "text", + "content": " to make the learning dynamics stable and to retain previously learned knowledge. This Retention function may have local and global components:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 214, + 381, + 416, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 381, + 416, + 425 + ], + "spans": [ + { + "bbox": [ + 214, + 381, + 416, + 425 + ], + "type": "interline_equation", + "content": "\\operatorname {R e t} _ {t} \\left(W, W _ {t - 1}\\right) = \\underbrace {\\frac {1}{\\eta_ {t}} \\mathrm {D} _ {t} \\left(W , W _ {t - 1}\\right)} _ {\\text {L o c a l R e t e n t i o n}} + \\underbrace {\\frac {1}{\\alpha_ {t}} \\mathrm {G} _ {t} \\left(W\\right)} _ {\\text {G l o b a l R e t e n t i o n}}.", + "image_path": "217599465fb61359aa527b59a4521cfa484c2814e47cefdea51dc2e3e13a3ff6.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "spans": [ + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "text", + "content": "Here, the term " + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_t(W, W_{t-1})" + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "text", + "content": ", which is a premetric that controls the deviations from " + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "inline_equation", + "content": "W_{t-1}" + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "text", + "content": ", aims at retaining previously learned knowledge. The coefficient " + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "inline_equation", + "content": "\\eta_t" + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "text", + "content": " can be viewed as a meta in-context learning rate, where larger values of " + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "inline_equation", + "content": "\\eta_t" + }, + { + "bbox": [ + 67, + 432, + 563, + 504 + ], + "type": "text", + "content": " leads to learning more from new concepts, while allowing higher forgetting of previously learned concepts. The second term is a global retention that controls the change of the memory with respect to its size. The special instances of the above viewpoint (e.g., without global retention, with implicit closed-form solution, and/or with limited memory structure) have been the motivation behind some of the recent studies such as Liu et al. (2024a)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 518, + 335, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 518, + 335, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 518, + 335, + 533 + ], + "type": "text", + "content": "3.4 Further Discussions on the Two Viewpoints" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 538, + 563, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 538, + 563, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 538, + 563, + 597 + ], + "type": "text", + "content": "The (FTRL Viewpoint) and (Learning-Retaining Viewpoint) are connected through the lens of online optimization. For example, as discussed above, by choosing linear approximation of the loss and quadratic regularization/retention, they can both cover online gradient descent update in (5) as a special case. One straightforward way to make the connection explicit is by defining the premetric " + }, + { + "bbox": [ + 67, + 538, + 563, + 597 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_t(W;W^{\\prime})" + }, + { + "bbox": [ + 67, + 538, + 563, + 597 + ], + "type": "text", + "content": " based on the previous loss functions and the regularization, as described in Proposition 3.2 below:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "spans": [ + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": "Proposition 3.2. Let " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "\\eta_t = \\eta" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " and define " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "h_t(W) \\coloneqq \\sum_{i=1}^{t-1} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i) + \\frac{1}{\\eta} R(W)" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": ". Assume " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{W} = \\mathbb{R}^d" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " and the function " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "h_t(W)" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " is strictly convex in " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " and let " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_h(\\cdot, \\cdot)" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " be the Bregman divergence defined by function " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "h(\\cdot)" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_h(W, W') = h(W) - h(W') - \\langle \\nabla h(W'), W - W' \\rangle" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": ". Set " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "Ret_t(W, W') = \\mathcal{D}_h(W, W')" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell}_t(W; x_t) = \\widehat{\\ell}_t(W; x_t)" + }, + { + "bbox": [ + 68, + 603, + 563, + 654 + ], + "type": "text", + "content": " in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 658, + 563, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 658, + 563, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 658, + 563, + 706 + ], + "type": "text", + "content": "We provide the proof in Appendix B. The above proposition shows that (Learning-Retaining Viewpoint) can also explain the approaches obtained by (FTRL Viewpoint), under some mild assumptions. Hence, (Learning-Retaining Viewpoint) may be seen as a more general version. This is why we focus on this viewpoint in most of our derivations in the next sections." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 742, + 319, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 751 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 751 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 564, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 564, + 134 + ], + "type": "text", + "content": "Remark 3. Given the above viewpoint, we can see that even by using additional global regularization there is no memory erasing or forgetting process (a common term in modern architectures (Behrouz et al. 2024c; Yang et al. 2024a)) but the model might decide to not retain the past state of the memory. Interestingly, this observation also matches the human memory process, where brain does not erase memories but they might become inaccessible due to retrieval failures (Robertson 2002). Therefore, instead of calling it a forget gate, later on, we use \"Retention Gate\" to refer to this term." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 139, + 564, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 139, + 564, + 212 + ], + "spans": [ + { + "bbox": [ + 68, + 139, + 564, + 212 + ], + "type": "text", + "content": "Remark 4. As we discuss in Section 4 and summarize in Table 1, most existing modern sequence models are optimizing associative memory objective (attentional bias in Equation 4) using gradient descent. Therefore, to provide further intuition about the connection of existing sequence models as well as their online learning interpretations, we discuss the above two viewpoints that are limited to gradient descent-based update rules. Our initial definition of attentional bias and associative memory in Equation 4, however, is broader and can be optimized by any optimization algorithm (e.g., even Newton's method, or non-parametric solutions)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 228, + 529, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 228, + 529, + 245 + ], + "spans": [ + { + "bbox": [ + 68, + 228, + 529, + 245 + ], + "type": "text", + "content": "4 MirAs: Learning to Memorize with Robust and Expressive Memory" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "spans": [ + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "text", + "content": "Building upon our definition of associative memory, attentional bias, and previous viewpoints, we present MIRAs framework that not only accurately unifies existing backbone architectures but it also provides insights on how to design the next generation of sequence models. As discussed earlier in Section 3, learning an associative memory can be interpreted as a meta-learning task, in which the associative memory learns how to compress and store data into its parameters at test time. The architecture of the memory in such tasks is particularly important as in longer contexts, the expressivity of the memory structure can limit its ability to learn the underlying patterns. Therefore, the first choice to design a sequence model is the structure of the memory. Given the structure of the memory, parameterized by a set of parameters " + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "text", + "content": ", as discussed earlier, we aim to minimize a loss function " + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "inline_equation", + "content": "\\ell(W; \\cdot, \\cdot)" + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "text", + "content": " with a retention regularizer " + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}(\\cdot)" + }, + { + "bbox": [ + 68, + 253, + 564, + 361 + ], + "type": "text", + "content": " via a learning algorithm (e.g., gradient descent). Accordingly, MIRAs requires four design choices:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 366, + 564, + 528 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "spans": [ + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "text", + "content": "1. Memory Structure: This choice specifies the architecture of the memory. For example, this architecture can be a vector, a linear function, a Multilayer Perceptron (MLP) layer, or even more complex structures. We may restrict the choice of " + }, + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "text", + "content": " to be within a certain region, e.g., " + }, + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "text", + "content": " to lie within an " + }, + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 72, + 366, + 563, + 403 + ], + "type": "text", + "content": " ball to avoid infinite values or unstable training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "spans": [ + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "text", + "content": "2. Attentional Bias: A key choice is the attentional bias objective " + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot)" + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "text", + "content": " in Equation 4. We can even consider different approximations of the loss function, (e.g., " + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "inline_equation", + "content": "\\widehat{\\ell} (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "text", + "content": " in (FTRL Viewpoint) or " + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell} (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 72, + 408, + 564, + 445 + ], + "type": "text", + "content": " in (Learning-Retaining Viewpoint)). The choice of attentional bias determines how memory memorizes the context, maps the inputs, and prioritizes the events." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "spans": [ + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "text", + "content": "3. Memory Stability and Retention: Another key choice is the retention regularizer " + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(\\cdot)" + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "text", + "content": " in (FTRL Viewpoint) and " + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(\\cdot)" + }, + { + "bbox": [ + 72, + 450, + 564, + 487 + ], + "type": "text", + "content": " in (Learning-Retaining Viewpoint)). In parametric setups, this choice balances learning with retention of past state. An effective retention gate is key to the good performance in long context tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 72, + 492, + 563, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 492, + 563, + 528 + ], + "spans": [ + { + "bbox": [ + 72, + 492, + 563, + 528 + ], + "type": "text", + "content": "4. Memory Algorithm: Finally, this choice specifies the learning algorithm that we use to optimize the memory objective. One may use gradient descent, gradient descent with momentum, or any other algorithm (including finding non-parametric solutions)." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 533, + 563, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 533, + 563, + 582 + ], + "spans": [ + { + "bbox": [ + 68, + 533, + 563, + 582 + ], + "type": "text", + "content": "The above choices are major design choices for designing backbone sequence models in neural architectures. There are, however, minor decisions that can distinguish models; i.e., data-dependent or independent parameters, scalar or channel-wise learning rate/retaining gate, etc. Next, we discuss the overview of how existing architectures fit into MIRAS framework." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "spans": [ + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "text", + "content": "RNNs with Hebbian Rule. The first generation of modern recurrent architectures (e.g., Linear attention (Katharopoulos et al. 2020), RetNet (Sun et al. 2023), Mamba (Gu et al. 2024), and GLA (Yang et al. 2024b)) are based on Hebbian-like (e.g., gated Hebbian) learning rule (Hebb 2005). We let attentional bias be the dot product similarity. That is, given a memory " + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\in \\mathbb{R}^{d \\times n}" + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "inline_equation", + "content": "\\mathbf{k}, \\mathbf{v} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "text", + "content": ", we define " + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "inline_equation", + "content": "\\tilde{\\ell}_t \\coloneqq -2\\langle \\mathcal{M}_t \\mathbf{k}_t, \\mathbf{v}_t \\rangle" + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "text", + "content": " and local retention as " + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(\\mathcal{M}, \\mathcal{M}_{t-1}) = \\| \\mathcal{M}_t - \\alpha \\mathcal{M}_{t-1} \\|_F^2" + }, + { + "bbox": [ + 68, + 592, + 563, + 664 + ], + "type": "text", + "content": ". Using Equation Learning-Retaining Viewpoint and gradient descent as the optimizer (i.e., memory learning algorithm), the memory update rule is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 270, + 669, + 563, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 669, + 563, + 683 + ], + "spans": [ + { + "bbox": [ + 270, + 669, + 563, + 683 + ], + "type": "interline_equation", + "content": "\\mathcal {M} _ {t} = \\alpha \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}. \\tag {8}", + "image_path": "c75e1ed65e4890c8f41a4677ad715405f0e110d55b7068f04d9b13a17c20dd7e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "spans": [ + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "content": "When (1) " + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "content": ", memory update is equivalent to Linear Attention (LA) (Katharopoulos et al. 2020); (2) " + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "content": " is a learnable parameter, resulting architecture is either lightening attention (" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "inline_equation", + "content": "n > 1" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "content": ") (Li et al. 2025) or RetNet (" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "content": ") (Sun et al. 2023); and (3) " + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "inline_equation", + "content": "\\alpha_{t} \\in \\mathbb{R}" + }, + { + "bbox": [ + 68, + 687, + 563, + 724 + ], + "type": "text", + "content": " are data-dependent learnable parameters, resulting sequence model is Mamba2 (Dao et al. 2024)." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "text", + "content": "RNNs with Delta Rule. To improve the memory management and to enhance the memory capacity of the above group, several studies suggest using delta rule (Neil et al. 2017; Schlag et al. 2021) as the learning algorithm in recurrent neural networks (e.g., DeltaNet (Schlag et al. 2021), Longhorn (Liu et al. 2024a), and RWKV7 (Peng et al. 2025b)). In this part, we recall that where " + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\in \\mathbb{R}^{d \\times n}" + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "text", + "content": ", delta rule is equivalent to optimizing MSE objective " + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{M}_t \\mathbf{k}_t - \\mathbf{v}_t \\|_2^2" + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(\\mathcal{M}, \\mathcal{M}_{t-1}) = \\| \\mathcal{M}_t - \\alpha \\mathcal{M}_{t-1} \\|_F^2" + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "text", + "content": " as local retention, and stochastic gradient descent as optimizer: (" + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "inline_equation", + "content": "\\eta_t" + }, + { + "bbox": [ + 68, + 73, + 563, + 145 + ], + "type": "text", + "content": " is defined in Equation Learning-Retaining Viewpoint)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 243, + 154, + 563, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 154, + 563, + 168 + ], + "spans": [ + { + "bbox": [ + 243, + 154, + 563, + 168 + ], + "type": "interline_equation", + "content": "\\mathcal {M} _ {t} = \\alpha \\left(\\mathbf {I} - \\eta_ {t} \\mathbf {k} _ {t} \\mathbf {k} _ {t} ^ {\\top}\\right) \\mathcal {M} _ {t - 1} + \\mathbf {v} _ {t} \\mathbf {k} _ {t} ^ {\\top}. \\tag {9}", + "image_path": "96fd0586f8ff19b8dc64739aa8cfed0612ce2586170d8796b768374b8ce866f9.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "spans": [ + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "text", + "content": "When (1) " + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "text", + "content": ", memory update is equivalent to DeltaNet (Schlag et al. 2021); and (2) " + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "inline_equation", + "content": "\\alpha_{t} \\in \\mathbb{R}^{m}" + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "text", + "content": " are data-dependent learnable parameters, resulting sequence model is either Gated DeltaNet (Yang et al. 2024a) (" + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "inline_equation", + "content": "m = 1" + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "text", + "content": "), or RWKV7 (Peng et al. 2025b) (" + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "inline_equation", + "content": "m = d" + }, + { + "bbox": [ + 67, + 177, + 563, + 212 + ], + "type": "text", + "content": "). Therefore, RNNs with delta rule are special instances of MIRAS." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "text", + "content": "Beyond Delta Rule. As discussed earlier, while delta rule with its value replacement strategy is more powerful than Hebbian-like learning rules, it suffers from theoretical limitations (Irie et al. 2023) and achieves moderate performance in practice (Yang et al. 2024c). Therefore, several studies have focused on update rules beyond delta rule. Recently, Titans (Behrouz et al. 2024c) suggests using non-linear MSE objective of " + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{M}_t(\\mathbf{k}_t) - \\mathbf{v}_t\\| _2^2" + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "text", + "content": " with both local and global retention of " + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_t = \\| W_t - W_{t - 1}\\| _F^2" + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_t = \\| W_t\\| _2^2" + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "text", + "content": " and optimize it with gradient descent with momentum " + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 67, + 223, + 564, + 308 + ], + "type": "text", + "content": ". Therefore, Titans-LMM is a special instance of MIRAs, where we use the abovementioned attentional bias and retention regularizations, and gradient descent with momentum as the optimizer." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "spans": [ + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "type": "text", + "content": "Another example of such models is Mesa-layer, in which the model uses " + }, + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{t} \\|\\mathcal{M}_{t}(\\mathbf{k}_{i}) - \\mathbf{v}_{i}\\|_{2}^{2}" + }, + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "type": "text", + "content": " as the attentional bias objective with " + }, + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "type": "inline_equation", + "content": "\\|\\mathcal{M}_{t}\\|_{2}^{2}" + }, + { + "bbox": [ + 67, + 313, + 563, + 361 + ], + "type": "text", + "content": " as the retention regularization. Since these models use Newton's method to optimize such an objective, they provide a more expressive update rule than delta rule. We further discuss a set of new learning algorithms beyond delta rule in Section 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "spans": [ + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "type": "text", + "content": "Attention. As discussed by Sun et al. (2024), softmax attention is a non-parametric solution of " + }, + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "type": "text", + "content": "-MSE loss function (i.e., " + }, + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "type": "inline_equation", + "content": "\\| W\\mathbf{k} - \\mathbf{v}\\| _2^2" + }, + { + "bbox": [ + 68, + 372, + 563, + 407 + ], + "type": "text", + "content": ") with Nadaraya-Watson estimator. Therefore, softmax attention is an instance of MIRAS, when we find the non-parametric solution to the MSE loss with Nadaraya-Watson estimator, without retention." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 426, + 459, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 426, + 459, + 441 + ], + "spans": [ + { + "bbox": [ + 68, + 426, + 459, + 441 + ], + "type": "text", + "content": "5 Beyond Existing Attentional Biases and Retention Gates" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "spans": [ + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": "As discussed in the previous section, existing work focuses only on linear/quadratic choices for the attentional bias or retention gate. In particular, the loss function " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "L(\\mathcal{M}(\\mathbf{k}_t),\\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " is defined as " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "L(\\mathcal{M}(\\mathbf{k}_t),\\mathbf{v}_t) = c_t\\| \\mathcal{M}(\\mathbf{k}_t) - \\mathbf{v}_t\\|^2" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " for some (learnable) constant " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "c_{t}" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " in prior work. Also the regularization term " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "R_{t}(W)" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " or the parametric " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "D_{t}" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " is considered as a quadratic/linear function. In addition, almost all prior work considers " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " to be the entire " + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 451, + 563, + 571 + ], + "type": "text", + "content": " space. However, in general there could be various choices for all the three aforementioned design choices. To illustrate the potential and flexibility of our designed framework, here, we review some of the potential design choices for attentional bias and retention gate in MirAS. For the sake of clarity, we discuss all these attentional bias and memory retention gates based on using gradient descent as the optimizer, and so based on the provided two view points. However, these attentional bias objectives and retention regularizers can be directly used in Equation 4 and optimized by using any other optimization algorithms, resulting in different update rules." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 585, + 263, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 585, + 263, + 597 + ], + "spans": [ + { + "bbox": [ + 69, + 585, + 263, + 597 + ], + "type": "text", + "content": "5.1 Alternative Attentional Biases" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "spans": [ + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": "Variant 1: " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": "-Attentional Bias. As discussed in the main body, attentional bias defines the \"similarity metric\" and measures how well memory can recall the value, given its corresponding key. Although " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": " regression loss often is a natural choice, it is sensitive to noise in the data. A natural extension is to use " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": "-norm class of objectives. That is, let " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": " be the memory, " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": " be the keys, and " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": " be the values, we define " + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 67, + 609, + 563, + 657 + ], + "type": "text", + "content": "-attentional bias as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 241, + 666, + 563, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 666, + 563, + 681 + ], + "spans": [ + { + "bbox": [ + 241, + 666, + 563, + 681 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\| \\mathcal {M} \\left(\\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {p} ^ {p}, \\tag {10}", + "image_path": "8a7042b492ca4691b3ec9e5526cdec1fc978947e64dbebf07fd6cb0322beef74.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 68, + 687, + 563, + 717 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 687, + 563, + 717 + ], + "spans": [ + { + "bbox": [ + 68, + 687, + 563, + 717 + ], + "type": "text", + "content": "The retention gate (forget gate) in Titans is different from Mamba2 and Gated DeltaNet that we discussed above. The main difference comes from the case of full memory erase. While Mamba2 gating removes the entire memory and treats the next token as the first ever seen data, Titans use a \"cold start\" strategy and use the previous state of the memory to measure the surprise of the incoming token before fully erasing the memory." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "p \\in \\mathbb{R}^{\\geq 1}" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "\\| . \\|_p" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": "-norm. Although depending on the distribution of the data, we might want to use different values of " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": " (see Section 6), different values of " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": " can result in memory architectures with interesting properties. For the sake of simplicity, let memory be a matrix, i.e., " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "W \\in \\mathbb{R}^{m \\times d}" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(W, \\mathbf{k}_t) = W\\mathbf{k}_t" + }, + { + "bbox": [ + 67, + 73, + 563, + 110 + ], + "type": "text", + "content": ", the closed form can be derived as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 148, + 118, + 563, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 118, + 563, + 133 + ], + "spans": [ + { + "bbox": [ + 148, + 118, + 563, + 133 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = W _ {t} - p \\eta_ {t} \\left(\\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot | W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} | ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top}. \\tag {11}", + "image_path": "8d6bffb8df93750e04bdb7ff0be4079a679cc12726ca66158bd46cc629edf8c8.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 141, + 238, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 141, + 238, + 153 + ], + "spans": [ + { + "bbox": [ + 69, + 141, + 238, + 153 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 69, + 141, + 238, + 153 + ], + "type": "inline_equation", + "content": "p = 1" + }, + { + "bbox": [ + 69, + 141, + 238, + 153 + ], + "type": "text", + "content": ", the recurrence is simplified as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 245, + 162, + 563, + 175 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 162, + 563, + 175 + ], + "spans": [ + { + "bbox": [ + 245, + 162, + 563, + 175 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t} - \\eta_ {t} \\operatorname {S i g n} \\left(W _ {t} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top}, \\tag {12}", + "image_path": "89af85ce7350411e6bbde1033f142e62f9faaf9dc2338bafb7405bd5b821d475.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "spans": [ + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "text", + "content": "which means that the memory has only two values of " + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "inline_equation", + "content": "1" + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "text", + "content": ". We call this variation value-less associative memory, in which we store entities (keys) but map them into two extreme class of " + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "inline_equation", + "content": "+1" + }, + { + "bbox": [ + 67, + 185, + 562, + 209 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "spans": [ + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "text", + "content": "Remark 5. One of the critical challenges to use the above update rule is in the backpropagation process, in which " + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "inline_equation", + "content": "\\operatorname{Sign}(\\cdot)" + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "inline_equation", + "content": "|\\cdot|" + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "text", + "content": " are non-differentiable and so might cause unstable training. To overcome this issue, we use " + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "inline_equation", + "content": "\\operatorname{Sign}(x) \\approx \\tanh(\\alpha x)" + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "inline_equation", + "content": "|x| = \\sqrt{x^2 + \\epsilon}" + }, + { + "bbox": [ + 68, + 215, + 563, + 252 + ], + "type": "text", + "content": ", as the smooth approximators of these functions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 258, + 563, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 258, + 563, + 293 + ], + "spans": [ + { + "bbox": [ + 68, + 258, + 563, + 293 + ], + "type": "text", + "content": "One simple interpretation for such behavior (i.e., value-less memory) is similar to the coping mechanism in humans (Loftus 1993), in which the memory does not store the values for extreme events. This interpretation of protective memory in extreme events motivates our next variant." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "type": "text", + "content": "Variant 2: Huber Loss: Memory with Coping Mechanism. While " + }, + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "type": "text", + "content": "-norm objective is a common choice for many statistical and machine learning tasks, it is known to be sensitive to outliers and extreme samples. This sensitivity extends to the use of " + }, + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 304, + 563, + 365 + ], + "type": "text", + "content": " loss for attentional bias. To address this and drawing motivation from robust regression literature, we suggest utilizing the Huber loss-type (Hastie et al. 2009; Huber 1992) as the attentional bias, thereby reducing the negative impact of the outlier data on the memory learning process." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 370, + 562, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 370, + 562, + 394 + ], + "spans": [ + { + "bbox": [ + 67, + 370, + 562, + 394 + ], + "type": "text", + "content": "We can apply Huber-type loss in three different ways: The first approach is to define the summation of the Huber loss across different coordinates as the total loss, i.e.," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 232, + 400, + 399, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 400, + 399, + 427 + ], + "spans": [ + { + "bbox": [ + 232, + 400, + 399, + 427 + ], + "type": "interline_equation", + "content": "\\ell (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\sum_ {j} \\mathcal {H} (\\mathcal {M} (W, \\mathbf {k} _ {t}) _ {j} - \\mathbf {v} _ {t, j}),", + "image_path": "f01b51a49753ba3c681cb7abc4b98a1db893901f08685228a232863a3845858e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(W,\\mathbf{k}_t)_j" + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{t,j}" + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": "-th coordinate of " + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(W,\\mathbf{k}_t)" + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_t" + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": " respectively. The function " + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\cdot):\\mathbb{R}\\mapsto \\mathbb{R}" + }, + { + "bbox": [ + 67, + 436, + 563, + 458 + ], + "type": "text", + "content": " is the Huber loss defined as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 240, + 457, + 563, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 457, + 563, + 483 + ], + "spans": [ + { + "bbox": [ + 240, + 457, + 563, + 483 + ], + "type": "interline_equation", + "content": "\\mathcal {H} (a) = \\left\\{ \\begin{array}{l l} \\frac {1}{2} a ^ {2} & \\text {i f} | a | \\leq \\delta \\\\ \\delta \\left(| a | - \\frac {1}{2} \\delta\\right) & \\text {i f} | a | > \\delta . \\end{array} \\right. \\tag {13}", + "image_path": "4670138483116ccc15979168b92a20ec83b7454bc05327f30119009a0ca9e0c6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 487, + 563, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 487, + 563, + 512 + ], + "spans": [ + { + "bbox": [ + 69, + 487, + 563, + 512 + ], + "type": "text", + "content": "Utilizing this attentional bias can lead to various memory update rules. For example, for the matrix form memory " + }, + { + "bbox": [ + 69, + 487, + 563, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(W,\\mathbf{k}_t) = W\\mathbf{k}_t" + }, + { + "bbox": [ + 69, + 487, + 563, + 512 + ], + "type": "text", + "content": ", the update rule is given by" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 82, + 519, + 563, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 519, + 563, + 540 + ], + "spans": [ + { + "bbox": [ + 82, + 519, + 563, + 540 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t - 1} - \\eta_ {t} \\left[ \\left(\\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {T}\\right) \\odot \\left(\\mathbf {I} \\left(\\left| W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| \\leq \\delta_ {t}\\right) \\mathbf {1} ^ {\\top}\\right) + \\left(\\delta_ {t} \\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} ^ {\\top}\\right) \\odot \\left(\\mathbf {I} \\left(\\left| W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| > \\delta_ {t}\\right) \\mathbf {1} ^ {\\top}\\right) \\right] \\tag {14}", + "image_path": "aedc967d779c770fa16e708083c25b3ec1849465db97daf9f15117ce24ee11ec.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "type": "text", + "content": "In this formulation, the parameter " + }, + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "type": "inline_equation", + "content": "\\delta_t" + }, + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "type": "text", + "content": " decides the type of the memory used for each block of memory (" + }, + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 548, + 563, + 572 + ], + "type": "text", + "content": "-norm objective or value-less) based on the context, making the memory more robust to outliers." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 578, + 471, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 578, + 471, + 590 + ], + "spans": [ + { + "bbox": [ + 68, + 578, + 471, + 590 + ], + "type": "text", + "content": "The second approach is to define the Huber-type loss based on the " + }, + { + "bbox": [ + 68, + 578, + 471, + 590 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 578, + 471, + 590 + ], + "type": "text", + "content": " loss over all coordinates, i.e.," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 237, + 600, + 394, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 600, + 394, + 613 + ], + "spans": [ + { + "bbox": [ + 237, + 600, + 394, + 613 + ], + "type": "interline_equation", + "content": "\\ell (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\mathcal {H} (\\| \\mathcal {M} (W, \\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| _ {2}).", + "image_path": "b974fbd3411b1dfe18a168b2543d52e5228e8e6e8b1b76ee1ccda75f87e0e0e1.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 622, + 563, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 622, + 563, + 647 + ], + "spans": [ + { + "bbox": [ + 67, + 622, + 563, + 647 + ], + "type": "text", + "content": "For simplicity of derivations, assume matrix memory " + }, + { + "bbox": [ + 67, + 622, + 563, + 647 + ], + "type": "inline_equation", + "content": "M(W,\\mathbf{k}_t) = W\\mathbf{k}_t" + }, + { + "bbox": [ + 67, + 622, + 563, + 647 + ], + "type": "text", + "content": ". Then using gradient descent for updating memory leads the memory update rule" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 165, + 654, + 563, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 654, + 563, + 687 + ], + "spans": [ + { + "bbox": [ + 165, + 654, + 563, + 687 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t - 1} - \\eta_ {t} \\left\\{ \\begin{array}{l l} \\left(\\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {T} & \\text {i f} \\| \\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} \\leq \\delta_ {t}, \\\\ \\delta_ {t} \\frac {\\left(\\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right)}{\\| \\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2}} \\mathbf {k} _ {t} ^ {T} & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {15}", + "image_path": "86e42072f55074973e119525ac0b4d70357b0c82a9dfab639385386274955ec6.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "type": "text", + "content": "Again, in the form (15), the parameter " + }, + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "type": "inline_equation", + "content": "\\delta_t" + }, + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "type": "text", + "content": " decides the type of the memory used (" + }, + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 695, + 563, + 719 + ], + "type": "text", + "content": "-norm objective or normalized version) based on the context, making the memory more robust to outliers." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "spans": [ + { + "bbox": [ + 313, + 742, + 319, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "type": "text", + "content": "Finally, in the third approach, we present a smooth mixture method, in which the memory decides if for an incoming data it is better to use " + }, + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 68, + 73, + 563, + 97 + ], + "type": "text", + "content": " attentional bias:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 187, + 103, + 563, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 103, + 563, + 135 + ], + "spans": [ + { + "bbox": [ + 187, + 103, + 563, + 135 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t - 1} - \\left\\{ \\begin{array}{l l} \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {i f} \\| \\mathcal {M} (\\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| \\leq \\delta_ {t}, \\\\ \\eta_ {t} \\delta_ {t} \\nabla \\ell_ {1} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {16}", + "image_path": "6a5d6a5348855d190cec18bdb8b3711243d3a588e3e14914577dcdd59c7cd910.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 141, + 258, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 141, + 258, + 153 + ], + "spans": [ + { + "bbox": [ + 69, + 141, + 258, + 153 + ], + "type": "text", + "content": "The role of parameter " + }, + { + "bbox": [ + 69, + 141, + 258, + 153 + ], + "type": "inline_equation", + "content": "\\delta_t" + }, + { + "bbox": [ + 69, + 141, + 258, + 153 + ], + "type": "text", + "content": " is the same as above." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 163, + 563, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 163, + 563, + 212 + ], + "spans": [ + { + "bbox": [ + 68, + 163, + 563, + 212 + ], + "type": "text", + "content": "Variant 3: Memory Robust to Value Shifts. Following the robustness requirement discussed in the previous section, we aim to design a memory mechanism that exhibits resilience against small shifts in the value parameter. A natural approach in this context is to employ a robust optimization formulation. Specifically, we define the loss function as the worst-case " + }, + { + "bbox": [ + 68, + 163, + 563, + 212 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 163, + 563, + 212 + ], + "type": "text", + "content": " distance between the predicted memory output and the perturbed true value:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 196, + 218, + 563, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 218, + 563, + 243 + ], + "spans": [ + { + "bbox": [ + 196, + 218, + 563, + 243 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\max _ {\\| \\delta \\mathbf {v} _ {t} \\| _ {2} \\leq \\Delta} \\frac {1}{2} \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\left(\\mathbf {v} _ {t} + \\boldsymbol {\\delta} \\mathbf {v} _ {t}\\right) \\| _ {2} ^ {2}. \\tag {17}", + "image_path": "5fcaaa66b990bc9ef3c4f4321be98b3555b40eb9a671ef6a62104bad2a17b709.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "spans": [ + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": "This formulation seeks the memory parameters " + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": " that perform well even under the adverse local perturbation of the true value " + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_t" + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": " within an " + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": " ball of radius " + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": ". To solve the maximization problem in (17), we find the optimal perturbation " + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "inline_equation", + "content": "\\delta \\mathbf{v}_t^*" + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": ". By solving this problem with respect to " + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "inline_equation", + "content": "\\delta \\mathbf{v}_t" + }, + { + "bbox": [ + 68, + 248, + 563, + 284 + ], + "type": "text", + "content": ", we arrive at:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 258, + 289, + 372, + 316 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 289, + 372, + 316 + ], + "spans": [ + { + "bbox": [ + 258, + 289, + 372, + 316 + ], + "type": "interline_equation", + "content": "\\delta \\mathbf {v} _ {t} ^ {*} = \\Delta \\frac {- \\mathcal {M} (W , \\mathbf {k} _ {t}) + \\mathbf {v} _ {t}}{\\| \\mathcal {M} (W , \\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| _ {2}}", + "image_path": "deab8e4e67fb2160fcfcd6ec9a71fbf1da090b323fc2cfc874bb3bd89c14d04e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 321, + 463, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 321, + 463, + 333 + ], + "spans": [ + { + "bbox": [ + 68, + 321, + 463, + 333 + ], + "type": "text", + "content": "Substituting this optimal perturbation back into the loss function (17), we obtain the robust loss:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 171, + 339, + 459, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 339, + 459, + 361 + ], + "spans": [ + { + "bbox": [ + 171, + 339, + 459, + 361 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right); \\mathbf {v} _ {t}\\right) = \\frac {1}{2} \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} ^ {2} + \\Delta \\| \\mathcal {M} \\left(W, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2} + \\frac {1}{2} \\Delta^ {2}.", + "image_path": "e0124b6e871a145ca549691849d789cc253f7b1b9707c8ee69c9ce879be46b66.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "spans": [ + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "text", + "content": "This robust loss function is a combination of the standard " + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "text", + "content": " loss and a term proportional to the " + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "text", + "content": " norm of the error, scaled by the robustness parameter " + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "text", + "content": ". The value of " + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 68, + 367, + 563, + 403 + ], + "type": "text", + "content": " thus controls the trade-off between fitting the nominal data and ensuring robustness against value perturbations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "text", + "content": "For simplicity of the derivations, let us consider a constant value for " + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "text", + "content": ", an Euclidean retention gate " + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(W,W_{t - 1}) = \\| W - W_{t - 1}\\|^2" + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "text", + "content": ", and an attentional bias term " + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell} (W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{t - 1},\\nabla \\ell (W_{t - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle" + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "text", + "content": ". Furthermore, to simplify the memory operation, we assume a linear matrix memory model " + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(W,\\mathbf{k}_t) = W\\mathbf{k}_t" + }, + { + "bbox": [ + 69, + 409, + 563, + 456 + ], + "type": "text", + "content": ". Under these assumptions, we can derive the memory update mechanism using gradient descent on the robust loss:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 176, + 471, + 453, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 471, + 453, + 498 + ], + "spans": [ + { + "bbox": [ + 176, + 471, + 453, + 498 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t - 1} - \\eta \\left(\\left(\\mathcal {M} \\left(W _ {t - 1}, \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top} + \\Delta \\frac {\\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t}}{\\| \\mathcal {M} \\left(W _ {t - 1} , \\mathbf {k} _ {t}\\right) - \\mathbf {v} _ {t} \\| _ {2}} \\mathbf {k} _ {t} ^ {\\top}\\right)", + "image_path": "0a828922658b45ce64a9d60d455642004a9b66661dae2bda4205d140b5a9eae8.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 500, + 562, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 500, + 562, + 525 + ], + "spans": [ + { + "bbox": [ + 68, + 500, + 562, + 525 + ], + "type": "text", + "content": "In this update rule, the parameter " + }, + { + "bbox": [ + 68, + 500, + 562, + 525 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 68, + 500, + 562, + 525 + ], + "type": "text", + "content": ", which governs the influence of the robustness term, can also be treated as a learnable parameter, allowing the model to adapt its robustness based on the observed data." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 538, + 250, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 538, + 250, + 551 + ], + "spans": [ + { + "bbox": [ + 69, + 538, + 250, + 551 + ], + "type": "text", + "content": "5.2 Alternative Retention Gates" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "spans": [ + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "type": "text", + "content": "Variant 1: Memorization Over A Scaled Probability Simplex Via " + }, + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "type": "text", + "content": "-Divergence. A common technique in learning to prevent numerical instabilities and exploding values is to restrict the search space to a bounded domain. Following this principle, to avoid numerical instabilities, we can constrained the variable " + }, + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "type": "inline_equation", + "content": "W_{t}" + }, + { + "bbox": [ + 68, + 563, + 563, + 609 + ], + "type": "text", + "content": " to lie within a (scaled) probability simplex. In other words, we can restrict the state to lie in the constraint set" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 228, + 617, + 402, + 631 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 617, + 402, + 631 + ], + "spans": [ + { + "bbox": [ + 228, + 617, + 402, + 631 + ], + "type": "interline_equation", + "content": "\\mathcal {W} = \\{W \\mid \\| W \\| _ {1} = c \\text {a n d} W _ {j l} \\geq 0, \\forall j, l \\}.", + "image_path": "6f6138ca1a100c7c01d4c5be0374fee01a58673f9ddfede85397b3d258447ee1.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "spans": [ + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": "In this set, each matrix " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": " can be viewed as a measure. Thus, in (Learning-Retaining Viewpoint), we can utilize divergences over measures to define our premetric. For example, we can use " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": "-divergence measure (Polyanskiy et al. 2025, Def 4.9), (Csiszar 1967) to define " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_t(\\cdot, \\cdot)" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": ". More specifically, let " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": " be a smooth strictly convex function from " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^+" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "\\mathbb{R}" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "f(1) = 0" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": ". Then, we can define the " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": "-divergence between " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "inline_equation", + "content": "W'" + }, + { + "bbox": [ + 68, + 638, + 564, + 686 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 251, + 692, + 378, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 692, + 378, + 724 + ], + "spans": [ + { + "bbox": [ + 251, + 692, + 378, + 724 + ], + "type": "interline_equation", + "content": "\\mathrm {D} _ {t} (W, W ^ {\\prime}) = \\sum_ {j l} W _ {j l} ^ {\\prime} f \\left(\\frac {W _ {j l}}{W _ {j l} ^ {\\prime}}\\right).", + "image_path": "96cd3ce06fdc6a0b1aafc10c2092653a94c089dba9bd731877fe06a975cb8caa.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "text", + "content": "It is known that " + }, + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "text", + "content": "-divergence is zero if and only if " + }, + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "inline_equation", + "content": "W = W'" + }, + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "text", + "content": "; see Polyanskiy et al. 2025, Theorem 2.3. Using the above premetric as the retention gate and setting " + }, + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell}(W; \\mathbf{k}_t, \\mathbf{v}_t) = \\langle W - W_{t-1}, \\nabla \\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\rangle" + }, + { + "bbox": [ + 67, + 73, + 563, + 108 + ], + "type": "text", + "content": " in (Learning-Retaining Viewpoint), we get the update rule" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 229, + 110, + 563, + 121 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 110, + 563, + 121 + ], + "spans": [ + { + "bbox": [ + 229, + 110, + 563, + 121 + ], + "type": "interline_equation", + "content": "W _ {t} = W _ {t - 1} \\odot g \\left(- \\zeta_ {t} - \\eta_ {t} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right). \\tag {18}", + "image_path": "a0d90278e9ce354c647573ee69cc6ddc7ab30921351794fa19c10a84c33ff0aa.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "spans": [ + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " is the inverse of the mapping " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "f'" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "g(f'(\\tau)) = \\tau" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "\\forall \\tau" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": "; the operator " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " denotes the Hadamard (elementwise) product, and " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "\\zeta_t" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " should be chosen such that " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "\\| W_t\\|_1 = c" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": ". Notice that since the function " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " is strictly convex and smooth, its derivative is strictly increasing and hence " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " is well defined. Conversely, for any strictly monotone function " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": ", we can find its inverse function " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "g^{-1}" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " (which is strictly increasing) and define " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "f(\\tau) = \\mathrm{const} + \\int_{\\tau' = 0}^{\\infty}g^{-1}(\\tau')d\\tau'" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": ". The term const should be chosen such that " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "f(1) = 0" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": ". Then the update rule in (18) can be interpreted by the " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": "-divergence regularization, as explained above. Therefore, one can directly choose a continuous monotonically increasing function " + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 67, + 127, + 563, + 211 + ], + "type": "text", + "content": " and use (18) for memory update." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "spans": [ + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "content": "Specializing to KL divergence. Let us further make the above update rule explicit by using special function " + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "content": ". If we choose " + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "inline_equation", + "content": "f(\\tau) = \\tau \\ln(\\tau)" + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "content": ", then the " + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "content": "-divergence becomes the widely used KL divergence measure " + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "inline_equation", + "content": "D_t(W, W_{t-1}) = \\sum_{jl} W_{jl} \\log \\left( \\frac{W_{jl}}{(W_t)_{jl}} \\right)" + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "content": ". In addition, we can also utilize the Shannon entropy as the global retention by regularizing deviations from uniform distribution, i.e., " + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "inline_equation", + "content": "G_t(W) = \\sum_{jl} W_{jl} \\log (W_{jl})" + }, + { + "bbox": [ + 67, + 222, + 563, + 287 + ], + "type": "text", + "content": ". Combining these choices of the local and global retention gates, we obtain the overall retention gate" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 187, + 287, + 443, + 317 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 287, + 443, + 317 + ], + "spans": [ + { + "bbox": [ + 187, + 287, + 443, + 317 + ], + "type": "interline_equation", + "content": "\\operatorname {R e t} _ {t} (W, W _ {t - 1}) = \\frac {1}{\\eta_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(\\frac {W _ {j l}}{\\left(W _ {t}\\right) _ {j l}}\\right) + \\frac {1}{\\alpha_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(W _ {j l}\\right)", + "image_path": "41e18f1774e205798aee024b6b97b0a05d32f79834cff545582d51fa15d3a06b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 323, + 562, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 323, + 562, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 323, + 562, + 348 + ], + "type": "text", + "content": "Choosing the attentional bias " + }, + { + "bbox": [ + 67, + 323, + 562, + 348 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell}(W; \\mathbf{k}_t, \\mathbf{v}_t) = \\langle W - W_{t-1}, \\nabla \\ell(W_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t) \\rangle" + }, + { + "bbox": [ + 67, + 323, + 562, + 348 + ], + "type": "text", + "content": " and the above retention gate will lead to the update rule" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 356, + 563, + 386 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 356, + 563, + 386 + ], + "spans": [ + { + "bbox": [ + 132, + 356, + 563, + 386 + ], + "type": "interline_equation", + "content": "W _ {t} = \\arg \\min _ {W} \\left\\langle W - W _ {t - 1}, \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) \\right\\rangle + \\frac {1}{\\eta_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(\\frac {W _ {j l}}{\\left(W _ {t}\\right) _ {j l}}\\right) + \\frac {1}{\\alpha_ {t}} \\sum_ {j l} W _ {j l} \\log \\left(W _ {j l}\\right) \\tag {19}", + "image_path": "638fa6e5739015c4bb34eb4d0121982ad373fbdc5354756583417349ef715446.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 388, + 563, + 415 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 388, + 563, + 415 + ], + "spans": [ + { + "bbox": [ + 167, + 388, + 563, + 415 + ], + "type": "interline_equation", + "content": "\\text {s . t .} \\quad \\sum_ {j l} W _ {j l} = c, W _ {j l} \\geq 0, \\forall j l \\tag {20}", + "image_path": "eedbb2a68d612ba06fdfdba58b8ba19dcd6af7d8c89ad2b80e6e9ffecb747f21.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 424, + 416, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 424, + 416, + 436 + ], + "spans": [ + { + "bbox": [ + 69, + 424, + 416, + 436 + ], + "type": "text", + "content": "Attaching the Lagrange multiplier to the first constraint, the KKT conditions imply" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 151, + 445, + 481, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 445, + 481, + 472 + ], + "spans": [ + { + "bbox": [ + 151, + 445, + 481, + 472 + ], + "type": "interline_equation", + "content": "\\left(\\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) _ {j l} + \\left(\\frac {1}{\\eta_ {t}} + \\frac {1}{\\alpha_ {t}}\\right) \\left(1 + \\log W _ {j l}\\right) - \\frac {1}{\\eta_ {t}} \\log \\left(\\left(W _ {t - 1}\\right) _ {j l}\\right) + \\mu_ {t} = 0, \\quad \\forall j, l", + "image_path": "1b80b78fc17746439f6cf250cf3c3c38705c501929e1e69c47d3373b43897121.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "inline_equation", + "content": "\\mu_t" + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "text", + "content": " should be chosen such that " + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "inline_equation", + "content": "\\sum_{jl} W_{jl} = c" + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "text", + "content": ". Rearranging the terms and defining " + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "inline_equation", + "content": "\\lambda_t = \\frac{1 / \\alpha_t}{1 / \\alpha_t + 1 / \\eta_t}" + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "inline_equation", + "content": "\\eta_t' = \\frac{1}{1 / \\alpha_t + 1 / \\eta_t}" + }, + { + "bbox": [ + 67, + 481, + 562, + 506 + ], + "type": "text", + "content": ", we get the update rule" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 197, + 507, + 563, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 507, + 563, + 520 + ], + "spans": [ + { + "bbox": [ + 197, + 507, + 563, + 520 + ], + "type": "interline_equation", + "content": "W _ {t} \\leftarrow c \\operatorname {S o f t m a x} \\left(\\left(1 - \\lambda_ {t}\\right) \\log \\left(W _ {t - 1}\\right) - \\eta_ {t} ^ {\\prime} \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) \\tag {21}", + "image_path": "3ca7a2124d41e7f26c2de601d6a3cc4a9a0585a05b2b7776efb58ef94c514354.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "spans": [ + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "inline_equation", + "content": "\\lambda_t \\in (0,1)" + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "inline_equation", + "content": "\\eta' \\in \\mathbb{R}^+" + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "text", + "content": " are the parameters that can be learned during training. The Softmax operator ensures that the output lies in the set " + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 67, + 525, + 563, + 549 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "text", + "content": "Notice that while all above calculations are done for a matrix " + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "text", + "content": ", similar update rule holds for other forms of parameters such as when " + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "text", + "content": " is a neural network (or when the parameter " + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 555, + 562, + 579 + ], + "type": "text", + "content": " is normalized per slice)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 590, + 563, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 563, + 650 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 563, + 650 + ], + "type": "text", + "content": "Variant 2: Elastic Net Regularization: Hard and Soft Forgetting. Elastic net is a powerful and popular tool in regression analysis to balance the feature selection capabilities of LASSO (Tibshirani 1996) and bias reduction properties of Ridge regression (Hilt et al. 1977; Hoerl et al. 1970). It has been widely used in different applications due to its ability to handle high-dimensional data and mitigate the effects of multicollinearity. Given this success, a natural question is what happens if we use this regularization scheme in our context." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "spans": [ + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "type": "text", + "content": "Let us start based on (Learning-Retaining Viewpoint) to design our memorization scheme. In (Learning-Retaining Viewpoint), we discussed that the loss function " + }, + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell_t} (W;\\mathbf{k}_t,\\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "type": "text", + "content": " is an approximation of the original function " + }, + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "type": "inline_equation", + "content": "\\ell (\\cdot)" + }, + { + "bbox": [ + 67, + 656, + 570, + 692 + ], + "type": "text", + "content": ", measuring our goodness-of-fit. Regularizing this loss with elastic net regularizer, we obtain the approximation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 223, + 700, + 407, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 700, + 407, + 714 + ], + "spans": [ + { + "bbox": [ + 223, + 700, + 407, + 714 + ], + "type": "interline_equation", + "content": "\\widetilde {\\ell} _ {t} (W; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) = \\langle W - W _ {t - 1}, \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\rangle .", + "image_path": "20c5125fed10048404b024a11c718e3f1e0d414743c2fa9dde5ee7dac4e734e0.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "type": "text", + "content": "with a global retention of " + }, + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_t(W) = \\frac{1}{2\\beta} \\| W\\| _2^2 +\\frac{1}{\\alpha}\\| W\\| _1" + }, + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "type": "text", + "content": ". To fully specify the update rule of (Learning-Retaining Viewpoint), we also need to specify the premetric functions " + }, + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_t(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 67, + 72, + 563, + 108 + ], + "type": "text", + "content": ". For the sake of keeping the update rule simple (and parallelizable), we can choose" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 250, + 107, + 380, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 107, + 380, + 129 + ], + "spans": [ + { + "bbox": [ + 250, + 107, + 380, + 129 + ], + "type": "interline_equation", + "content": "\\mathrm {D} _ {t} (W, W _ {t - 1}) = \\frac {1}{2} \\| W - W _ {t - 1} \\| _ {2} ^ {2}.", + "image_path": "998a3dee3d52105145aa5ef25e50506e70a19e6a6ab4092054d174e0f9ddea34.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 133, + 438, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 133, + 438, + 145 + ], + "spans": [ + { + "bbox": [ + 69, + 133, + 438, + 145 + ], + "type": "text", + "content": "These choices of the attentional bias and retention gate leads to the following update rule:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 238, + 163, + 563, + 175 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 163, + 563, + 175 + ], + "spans": [ + { + "bbox": [ + 238, + 163, + 563, + 175 + ], + "type": "interline_equation", + "content": "W _ {t} = \\mathcal {S} _ {Y} \\left(\\lambda W _ {t - 1} - \\zeta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right), \\tag {22}", + "image_path": "8c7d5fcd845dc003fc2e069ac8ba0ebf252f221a003508ea6e48a2f885f5075c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "spans": [ + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "inline_equation", + "content": "\\gamma = \\frac{\\eta\\beta}{\\alpha(\\eta + \\beta)}" + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "inline_equation", + "content": "\\lambda = \\frac{\\beta}{\\beta + \\eta}" + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "inline_equation", + "content": "\\zeta = \\eta\\lambda" + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "inline_equation", + "content": "S_{\\gamma}" + }, + { + "bbox": [ + 67, + 180, + 562, + 207 + ], + "type": "text", + "content": " is the soft thresholding operator, applied element-wise. For each element, this operator is defined as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 247, + 208, + 383, + 221 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 208, + 383, + 221 + ], + "spans": [ + { + "bbox": [ + 247, + 208, + 383, + 221 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {\\gamma} (z) = \\operatorname {s i g n} (z) \\max \\left\\{0, | z | - \\gamma \\right\\}.", + "image_path": "524c1b4283649a6bdf72c52e3e89b019b5582b3cd8a6e613ade1c2d8d84d62be.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "spans": [ + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "content": "In other words, for large values of " + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "inline_equation", + "content": "S_{\\gamma}(z)" + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "content": " makes " + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "content": " closer to zero by " + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "content": " amount. If it is already in the " + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 67, + 225, + 563, + 251 + ], + "type": "text", + "content": "-vicinity of zero, then it makes it zero (hard forget)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "spans": [ + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": "Equation (22) can be viewed as a combination of soft forgetting (obtained by multiplying " + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "inline_equation", + "content": "\\lambda \\in (0,1)" + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": ", and a hard forgetting (if it is smaller than " + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": "). The hyperparameters " + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "inline_equation", + "content": "\\gamma, \\lambda," + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": " can be learned. Notice that since the shrinkage operator is not differentiable, we can approximate it with its smooth approximation. For example, we can use " + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "inline_equation", + "content": "S_{\\gamma}(z) \\approx \\frac{|z|*\\arctan(z / \\gamma)}{\\pi / 2}" + }, + { + "bbox": [ + 69, + 255, + 563, + 309 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 318, + 563, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 318, + 563, + 342 + ], + "spans": [ + { + "bbox": [ + 67, + 318, + 563, + 342 + ], + "type": "text", + "content": "Variant 3: Elastic Net Regularization: Forgetting via Soft-thresholding. The elastic net regularizer can also be used in the (FTRL Viewpoint). In particular, in (FTRL Viewpoint), we can set" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 253, + 350, + 377, + 375 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 350, + 377, + 375 + ], + "spans": [ + { + "bbox": [ + 253, + 350, + 377, + 375 + ], + "type": "interline_equation", + "content": "\\frac {1}{\\eta_ {t}} R _ {t} (W) = \\frac {1}{\\eta} \\| W \\| ^ {2} + \\frac {1}{\\alpha} \\| W \\| _ {1}", + "image_path": "75fe40f1a6a66feab69947ae112d64c78184fb16a456f9a46b288bf8efc32098.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "type": "text", + "content": "and use " + }, + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "type": "inline_equation", + "content": "\\widehat{\\ell}(W; x_i) = \\langle W - W_{i-1}, \\nabla \\ell(W_{i-1}; x_i) \\rangle" + }, + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "type": "text", + "content": ". Assuming initialization at " + }, + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "type": "inline_equation", + "content": "W_0 = 0" + }, + { + "bbox": [ + 67, + 384, + 563, + 409 + ], + "type": "text", + "content": ", these choices of attentional bias and retention gate leads to the update rules:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 253, + 418, + 376, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 418, + 376, + 430 + ], + "spans": [ + { + "bbox": [ + 253, + 418, + 376, + 430 + ], + "type": "interline_equation", + "content": "A _ {t} = A _ {t - 1} - \\eta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)", + "image_path": "80a9a514a1cab65985fa34f572dcd4f24955f7607eb94f8a68c8f341579616f3.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 254, + 433, + 561, + 447 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 433, + 561, + 447 + ], + "spans": [ + { + "bbox": [ + 254, + 433, + 561, + 447 + ], + "type": "interline_equation", + "content": "W _ {t} = \\mathcal {S} _ {\\eta / \\alpha} (A _ {t}) \\tag {23}", + "image_path": "82af14f2e0aaae35d69d79239be6a195272495eb0caac98230aecab4efe861aa.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "spans": [ + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "type": "inline_equation", + "content": "S_{\\eta /\\alpha}(\\cdot)" + }, + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "type": "text", + "content": " is the soft-thresholding operator with parameter " + }, + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "type": "inline_equation", + "content": "\\eta /\\alpha" + }, + { + "bbox": [ + 68, + 455, + 533, + 468 + ], + "type": "text", + "content": " , which can be smoothly as explained in Variant 1.1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "spans": [ + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": "Variant 4: General " + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "inline_equation", + "content": "L_{q}" + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": " Memory Stability. Existing work is based on the retention gate choices " + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_t(W, W_{t-1}) = \\|W - W_{t-1}\\|_F^2" + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "inline_equation", + "content": "R(W) = \\|W\\|_2^2" + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": ". However, one can choose other choices of retention gate. For example, in (FTRL Viewpoint), we can choose " + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "inline_equation", + "content": "L_{q}" + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": " norm as the regularizer " + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "inline_equation", + "content": "R(W)" + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": ". More specifically, for " + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "inline_equation", + "content": "1 < q \\leq 2" + }, + { + "bbox": [ + 67, + 477, + 562, + 515 + ], + "type": "text", + "content": ", we can set" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 258, + 522, + 372, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 522, + 372, + 548 + ], + "spans": [ + { + "bbox": [ + 258, + 522, + 372, + 548 + ], + "type": "interline_equation", + "content": "\\frac {1}{\\eta_ {t}} R (W) = \\frac {1}{2 \\eta (q - 1)} \\| W \\| _ {q} ^ {2}.", + "image_path": "1d6a7b87a84a675d8fbf5949407933394dd73ec2c00a9ea617d43aaca6ccaa12.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "spans": [ + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": "Using this retention gate and choosing " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "\\widehat{\\ell_i} (W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{i - 1},\\nabla \\ell (W_{i - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": " in (FTRL Viewpoint), leads to the update rule " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "W_{t} = -\\eta \\frac{A_{t}}{\\|A_{t}\\|_{p}^{p - 2}}" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "p = \\frac{q}{q - 1}" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "A_{t} = \\sum_{i = 1}^{t}\\nabla \\ell (W_{i - 1};\\mathbf{k}_{t},\\mathbf{v}_{t})" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": "; see Shalev-Shwartz et al. 2012, Section 2.6. Here, " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": " denotes the Hadamard (element-wise) product and " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "|\\cdot |" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": " is the element-wise absolute value operator. Assuming " + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "inline_equation", + "content": "W_0 = 0" + }, + { + "bbox": [ + 67, + 556, + 563, + 611 + ], + "type": "text", + "content": ", this update rule can be recursively written as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 203, + 618, + 427, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 618, + 427, + 647 + ], + "spans": [ + { + "bbox": [ + 203, + 618, + 427, + 647 + ], + "type": "interline_equation", + "content": "A _ {t} = A _ {t - 1} - \\eta \\nabla \\ell \\left(W _ {i - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\quad \\text {a n d} \\quad W _ {t} = \\frac {A _ {t}}{\\| A _ {t} \\| _ {p} ^ {p - 2}}.", + "image_path": "e1fa15a95e49443ca33904e34dd76f9eeae7b899d5a0d58b39de8ce306998dbd.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "text", + "content": "Variant 5: Bregman Divergence as Retention Gate.. Another natural choice is to use Bregman divergence as retention gate, leading to a mirror descent-type algorithms. In particular, given a smooth strictly convex function " + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "inline_equation", + "content": "f(\\cdot): \\mathbb{R} \\mapsto \\mathbb{R}" + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "text", + "content": ", we can define the function " + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "inline_equation", + "content": "F(W) = \\sum_{jl} f(W_{jl})" + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "text", + "content": ". Based on this choice of function " + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 666, + 563, + 702 + ], + "type": "text", + "content": ", we define the Bregman divergence" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 221, + 711, + 408, + 723 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 711, + 408, + 723 + ], + "spans": [ + { + "bbox": [ + 221, + 711, + 408, + 723 + ], + "type": "interline_equation", + "content": "D _ {t} (W, W ^ {\\prime}) = F (W) - F \\left(W ^ {\\prime}\\right) - \\langle W ^ {\\prime}, W - W ^ {\\prime} \\rangle", + "image_path": "b37c3f16fd8638dc8e885a6a7c29c0d3b1ed3f936a851fcf456b6c53d4ea685b.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 750 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 563, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 563, + 97 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 563, + 97 + ], + "type": "text", + "content": "as our parametric function. Utilizing this retention gate and choosing " + }, + { + "bbox": [ + 67, + 72, + 563, + 97 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell}_t(W;\\mathbf{k}_t,\\mathbf{v}_t) = \\langle W - W_{t - 1},\\nabla \\ell (W_{t - 1};\\mathbf{k}_t,\\mathbf{v}_t)\\rangle" + }, + { + "bbox": [ + 67, + 72, + 563, + 97 + ], + "type": "text", + "content": " in (Learning-Retaining Viewpoint), we obtain the update rule" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 232, + 105, + 399, + 118 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 105, + 399, + 118 + ], + "spans": [ + { + "bbox": [ + 232, + 105, + 399, + 118 + ], + "type": "interline_equation", + "content": "W _ {t} = g \\left(- \\eta \\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) + F ^ {\\prime} \\left(W _ {t - 1}\\right)\\right).", + "image_path": "b42089c29f66a2bb51edc81510e39c88cec6e022f3a409e89770c123329cfb28.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "spans": [ + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "inline_equation", + "content": "F'" + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": " is the mapping obtained by applying " + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "inline_equation", + "content": "f'(\\cdot)" + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": " (the derivative of " + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": ") element-wise to all entries of its input matrix argument. The function " + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": " is the inverse of the mapping " + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "inline_equation", + "content": "F'(\\cdot)" + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "inline_equation", + "content": "g(F'(W)) = W" + }, + { + "bbox": [ + 67, + 125, + 563, + 150 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "spans": [ + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": "If we choose " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f(\\tau) = \\frac{\\tau^2}{2}" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "F'(W)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " becomes the identity mapping and so is " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": ". Therefore, the above update becomes simple gradient descent with no nonlinearity involved in the update rule. However, other choices of " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " introduces additional nonlinearity in " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": ", which can enhance the expressivity of our memory. For example, we can choose the function " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " so that its derivative becomes the inverse sigmoid function, i.e., " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f'(\\tau) = \\ln \\left( \\frac{\\tau}{1 - \\tau} \\right)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f': (0,1) \\mapsto \\mathbb{R}" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f'(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " is strictly increasing, then the function " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " (and hence " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "F(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": ") is strictly convex. Therefore, the Bregman divergence is well defined. Moreover, the inverse of the function " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "f'(\\cdot)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " becomes the sigmoid function, i.e., " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "g(\\tau) = \\sigma(\\tau) = \\frac{\\exp(\\tau)}{1 + \\exp(\\tau)}" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "inline_equation", + "content": "g: \\mathbb{R} \\mapsto (0,1)" + }, + { + "bbox": [ + 67, + 156, + 564, + 243 + ], + "type": "text", + "content": ". Then, the update of the memory becomes" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 228, + 251, + 403, + 278 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 251, + 403, + 278 + ], + "spans": [ + { + "bbox": [ + 228, + 251, + 403, + 278 + ], + "type": "interline_equation", + "content": "W _ {t} = \\sigma \\left(\\ln \\left(\\frac {W _ {t}}{1 - W _ {t}}\\right) - \\eta \\nabla \\ell (W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t})\\right),", + "image_path": "9b1352fc7030eb3363cc479fcf394be2f1162bce2d62c322474217700ed85aa1.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "content": " is the sigmoid function operated element-wise on the entries of " + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "content": ", and the division operator " + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "inline_equation", + "content": "\\frac{W_t}{1 - W_t}" + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "content": " is also performed element-wise. This update rule guarantees that the elements of " + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "inline_equation", + "content": "W_t" + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "content": " remain within the interval " + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "inline_equation", + "content": "(0, 1)" + }, + { + "bbox": [ + 67, + 285, + 563, + 310 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 324, + 354, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 324, + 354, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 324, + 354, + 338 + ], + "type": "text", + "content": "5.3 MIRAs's Variants: MONETA, YAAD, and MEMORA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 344, + 563, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 344, + 563, + 381 + ], + "spans": [ + { + "bbox": [ + 67, + 344, + 563, + 381 + ], + "type": "text", + "content": "In the previous section we discussed different potential choices for attentional bias and retention gate to show the generality and the potential of MIRAs. In this section, building upon our framework, we present three novel sequence models, each of which designed based on a different motivation, and discuss how they can leverage fast parallel training." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": "MOnETA. Given " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "p,q\\in \\mathbb{R}^{\\geq 1}" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": ", we design " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "(p,q)" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": "-MONETA as the variant of MIRAs as follows: (1) For the choice of memory architecture, we use an MLP with 2 layers with expansion factor of 4 and GELU activation function (Hendrycks et al. 2016). We also use residual connections and layer norm, resulting in " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "\\mathcal{M}(x) = x + \\mathsf{LN}(W_1\\sigma (W_2x))" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": ". (2) We choose " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": "-attentional bias (introduced in Equation 11) for MONETA. (3) For the choice of retention gate, we use the hybrid of " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "\\ell_q" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": " retention gate " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "\\frac{1}{2(q - 1)}\\| W\\| _q^2" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": " (see Section 5.2 for details) and the standard " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": " regularization " + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "inline_equation", + "content": "\\frac{1}{\\beta}\\| W\\| _2^2" + }, + { + "bbox": [ + 67, + 390, + 563, + 464 + ], + "type": "text", + "content": ". (4) Finally, we use gradient descent as the memory learning algorithm. The above choices, result in the following recurrent formula for the memory module:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 195, + 470, + 563, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 470, + 563, + 499 + ], + "spans": [ + { + "bbox": [ + 195, + 470, + 563, + 499 + ], + "type": "interline_equation", + "content": "A _ {t} = \\alpha_ {t} A _ {t - 1} - \\eta_ {t} \\nabla \\ell_ {p} \\left(W _ {i - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right), \\quad \\text {a n d} \\quad W _ {t} = \\frac {A _ {t}}{\\| A _ {t} \\| _ {q} ^ {q - 2}}. \\tag {24}", + "image_path": "2a736aa1cc3e1f2073676420b2b757c56323282bcfd7e821464558383a1be3a3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 506, + 257, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 506, + 257, + 517 + ], + "spans": [ + { + "bbox": [ + 69, + 506, + 257, + 517 + ], + "type": "text", + "content": "Notably the gradient can be calculated using:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 186, + 525, + 563, + 539 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 525, + 563, + 539 + ], + "spans": [ + { + "bbox": [ + 186, + 525, + 563, + 539 + ], + "type": "interline_equation", + "content": "\\nabla \\ell \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = p \\eta_ {t} \\left(\\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot | W \\mathbf {k} _ {t} - \\mathbf {v} _ {t} | ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top}. \\tag {25}", + "image_path": "026c36efefaa24294a6847f1b1619a63f0d80c4dc7095d54b572f5ab7467df99.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 546, + 160, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 546, + 160, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 546, + 160, + 559 + ], + "type": "text", + "content": "We use " + }, + { + "bbox": [ + 69, + 546, + 160, + 559 + ], + "type": "inline_equation", + "content": "(p,q) = (3,4)" + }, + { + "bbox": [ + 69, + 546, + 160, + 559 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 568, + 563, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 568, + 563, + 654 + ], + "spans": [ + { + "bbox": [ + 67, + 568, + 563, + 654 + ], + "type": "text", + "content": "YAAD. Building upon our discussion on the importance of robust memory that protects itself from extreme events (tokens), we design YAAD based on Huber objective. That is, in MirAS, for the choice of memory structure, we follow MONETA and use an MLP with the same architecture as above; for the choice of attentional bias, we use Huber loss (defined in Equation 16); for the choice retention gate, for the sake of simplicity, we use a combination of local and global retention as " + }, + { + "bbox": [ + 67, + 568, + 563, + 654 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(W,W_{t - 1}) = \\frac{1}{2\\theta_t}\\| W - W_{t - 1}\\| _F^2 +\\frac{1}{\\beta_t}\\| W\\| _2^2" + }, + { + "bbox": [ + 67, + 568, + 563, + 654 + ], + "type": "text", + "content": " , which is equivalent to the \"forget gate\" mechanism introduced by Behrouz et al. (2024c); and finally, we simply use gradient descent as the memory learning algorithm. Given the above choices, we can write the resulted memory learning process as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 182, + 661, + 563, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 661, + 563, + 693 + ], + "spans": [ + { + "bbox": [ + 182, + 661, + 563, + 693 + ], + "type": "interline_equation", + "content": "W _ {t} = \\alpha_ {t} W _ {t - 1} - \\left\\{ \\begin{array}{l l} \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {i f} \\| \\mathcal {M} (\\mathbf {k} _ {t}) - \\mathbf {v} _ {t} \\| \\leq \\delta_ {t}, \\\\ \\eta_ {t} \\delta_ {t} \\nabla \\ell_ {1} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) & \\text {O t h e r w i s e .} \\end{array} \\right. \\tag {26}", + "image_path": "9ff03c9a38b84e17a1435eb7100dbd7d1ccc7926dca27c68ad8371b6f988ca36.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "spans": [ + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "text", + "content": "Note that for improving the expressive power, in all architectures, we decouple the learning rate " + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "text", + "content": " and the retention gate rate " + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "text", + "content": ", resulting in an independent parameter " + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "inline_equation", + "content": "\\beta_{t} \\in [0,1]^{d}" + }, + { + "bbox": [ + 67, + 699, + 563, + 724 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 71, + 211, + 277 + ], + "blocks": [ + { + "bbox": [ + 111, + 71, + 211, + 277 + ], + "lines": [ + { + "bbox": [ + 111, + 71, + 211, + 277 + ], + "spans": [ + { + "bbox": [ + 111, + 71, + 211, + 277 + ], + "type": "image", + "image_path": "9cb01a969297d8878bd8358e093a6abd23c24cfb85585b92f9cd441c4a9e7943.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 289, + 563, + 315 + ], + "lines": [ + { + "bbox": [ + 67, + 289, + 563, + 315 + ], + "spans": [ + { + "bbox": [ + 67, + 289, + 563, + 315 + ], + "type": "text", + "content": "Figure 2: Visualization of the MirAs's variant architecture, their hybrid counterpart with SWA, and block design of MirAs layer." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 224, + 70, + 325, + 277 + ], + "blocks": [ + { + "bbox": [ + 224, + 70, + 325, + 277 + ], + "lines": [ + { + "bbox": [ + 224, + 70, + 325, + 277 + ], + "spans": [ + { + "bbox": [ + 224, + 70, + 325, + 277 + ], + "type": "image", + "image_path": "e133558c8c3bee1beea63c659fa263e8cff03265eb3eaa12333ad63238b2f34c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 342, + 71, + 521, + 277 + ], + "blocks": [ + { + "bbox": [ + 342, + 71, + 521, + 277 + ], + "lines": [ + { + "bbox": [ + 342, + 71, + 521, + 277 + ], + "spans": [ + { + "bbox": [ + 342, + 71, + 521, + 277 + ], + "type": "image", + "image_path": "736c0aee42d8244d6bf36c4864778f73d5974662cdb103a9c534a218935a9611.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 335, + 563, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 335, + 563, + 396 + ], + "spans": [ + { + "bbox": [ + 67, + 335, + 563, + 396 + ], + "type": "text", + "content": "MEMORA. Finally, in MEMORA, we use the idea of elastic net regularization (i.e., hard and soft retention). To this end, in Miras: (1) For the choice of memory architecture, similar to above variants, we use an MLP (the same architecture as the previous variants). (2) For the choice of attentional bias, we use simple " + }, + { + "bbox": [ + 67, + 335, + 563, + 396 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 335, + 563, + 396 + ], + "type": "text", + "content": " regression loss. (3) For the choice of retention gate we use KL divergence as in Equation 21. (4) Finally, we optimize the memory using gradient descent, resulting in the following update rule:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 212, + 422, + 563, + 436 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 422, + 563, + 436 + ], + "spans": [ + { + "bbox": [ + 212, + 422, + 563, + 436 + ], + "type": "interline_equation", + "content": "W _ {t} = \\operatorname {S o f t m a x} \\left(\\alpha_ {t} \\log \\left(W _ {t - 1}\\right) - \\eta_ {t} \\nabla \\ell_ {2} \\left(W _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right)\\right) \\tag {27}", + "image_path": "2c094c79d0c5c5d07d2462c9a20b77422b3b010ddf2082cd793ae09d0bc64a5d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 460, + 323, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 323, + 474 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 323, + 474 + ], + "type": "text", + "content": "5.4 Architecture Backbone and Fast Training" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "text", + "content": "Architectural Backbone. For the architectural backbone, we fully follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a): We replace attention modules with our variants of MIRAs in Llama's macro architecture with MLPs with SwiGLU(. ) activation, rotary positional encodings (RoPE) (Su et al. 2024), and RMSNorm (Zhang et al. 2019). For MIRAs layer block, we follow the recent modern linear recurrent models (Behrouz et al. 2024c; Yang et al. 2024a), and incorporate a 1D depthwise-separable convolution layer (with kernel size of 4) after each of the query, key, and value projections. For the sake of training stability, we also use " + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "text", + "content": " normalization to " + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 67, + 483, + 563, + 568 + ], + "type": "text", + "content": ". The output of MIRAs layer block is normalized and gated with a linear layer (Mehta et al. 2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "spans": [ + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": "Channel-wise Parameters. For learnable parameters of " + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "inline_equation", + "content": "\\eta_t, \\delta_t" + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": " and the retention gate of " + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "inline_equation", + "content": "\\alpha_t" + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": " we use channel-wise parametrization, i.e., " + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "inline_equation", + "content": "\\eta_t, \\delta_t, \\alpha_t \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": ". While gaining more expressive power, this parametrization results in significant parameter increase. To mitigate this issue, following Peng et al. (2025b), we use low-rank projections to project the input into " + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^k" + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": " and then to " + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 578, + 563, + 627 + ], + "type": "text", + "content": " is a hyperparameter (usually 32 or 64). The backbone architecture is illustrated in Figure 2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 637, + 562, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 637, + 562, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 637, + 562, + 673 + ], + "type": "text", + "content": "Hybrid Models. We also evaluate the hybrid version of Miras's variants. For hybrid models, we follow the Samba (Ren et al. 2024) architecture, in which we sequentially combine our Miras layer with Sliding Window Attention (SWA). The illustration of hybrid model Figure 2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 683, + 563, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 683, + 563, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 683, + 563, + 720 + ], + "type": "text", + "content": "Parallelizable Training. While the design of Miras's variant are theoretically well-motivated, their recurrence is non-linear, potentially making their straightforward training slow for large scales. In this section, we build upon the work of Behrouz et al. (2024c) and Sun et al. (2024) to make the training parallelizable. The main idea is to divide the sequence into" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "text", + "content": "chunks with size " + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "text", + "content": " (usually is 16 or 64) and calculate the gradient for all tokens in the current chunk with respect to the last state of the memory in the previous chunk. That is, we use " + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "inline_equation", + "content": "\\nabla \\ell(\\mathcal{M}_{t'}; \\mathbf{k}_t, \\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "inline_equation", + "content": "\\nabla \\ell(\\mathcal{M}_{t-1}; \\mathbf{k}_t, \\mathbf{v}_t)" + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "inline_equation", + "content": "t'" + }, + { + "bbox": [ + 67, + 73, + 563, + 109 + ], + "type": "text", + "content": " is the last state in the previous chunk." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 114, + 563, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 114, + 563, + 163 + ], + "spans": [ + { + "bbox": [ + 67, + 114, + 563, + 163 + ], + "type": "text", + "content": "Given the above trick, we can calculate all gradients at once and make the recurrence inside each chunk linear. However, to fully take advantage of accelerators, we need to reformulate the process as matrix multiplication. For MONETA, for the sake of clarity, assume " + }, + { + "bbox": [ + 67, + 114, + 563, + 163 + ], + "type": "inline_equation", + "content": "q = 2" + }, + { + "bbox": [ + 67, + 114, + 563, + 163 + ], + "type": "text", + "content": ". We follow the same algorithm as Behrouz et al. (2024c) and expand the recurrence as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 238, + 173, + 563, + 218 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 173, + 563, + 218 + ], + "spans": [ + { + "bbox": [ + 238, + 173, + 563, + 218 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {M} _ {t} = \\alpha_ {t} \\mathcal {M} _ {t - 1} - \\eta_ {t} \\nabla \\ell (\\mathcal {M} _ {t - 1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) \\\\ = \\beta_ {t} \\mathcal {M} _ {0} - \\sum_ {i = 1} ^ {t} \\eta_ {i} \\frac {\\beta_ {t}}{\\beta_ {i}} \\nabla \\ell \\left(\\mathcal {M} _ {t ^ {\\prime}}; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right), \\tag {28} \\\\ \\end{array}", + "image_path": "ae2ecbc4216e63040b842529ceb40ac1992903cfd2b1e8eaf5ee7be91e76f1e8.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "inline_equation", + "content": "t' = t - \\mathrm{mod}(t, b)" + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "inline_equation", + "content": "\\beta_{i} = \\prod_{j=1}^{i} \\alpha_{j}" + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": ". For the sake of clarity, we focus on the first chunk, i.e., " + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "inline_equation", + "content": "t = b" + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": " and so " + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "inline_equation", + "content": "t' = 0" + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": ", and explain the process for the case that " + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t = W_t" + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": " is linear. The process for 2-layer MLPs and other chunks is similar. Using " + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 67, + 228, + 563, + 264 + ], + "type": "text", + "content": " loss function, we have:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 156, + 273, + 563, + 320 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 273, + 563, + 320 + ], + "spans": [ + { + "bbox": [ + 156, + 273, + 563, + 320 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla \\ell \\left(W _ {0}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = p \\left(\\operatorname {S i g n} \\left(W _ {0} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot \\left| W _ {0} \\mathbf {k} _ {t} - \\mathbf {v} _ {t} \\right| ^ {p - 1}\\right) \\mathbf {k} _ {t} ^ {\\top} \\\\ \\Rightarrow \\sum_ {i = 1} ^ {b} \\eta_ {i} \\frac {\\beta_ {b}}{\\beta_ {i}} \\nabla \\ell \\left(W _ {0};; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) = p \\mathbf {E} _ {b} \\odot \\mathbf {B} _ {b} \\odot \\operatorname {S i g n} \\left(W \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\odot \\left(\\left| W _ {0} \\mathbf {K} - \\mathbf {V} \\right| ^ {p - 1}\\right) \\mathbf {K} ^ {\\top}, \\tag {29} \\\\ \\end{array}", + "image_path": "2e046a24e952b90612b884a9e695d2aca8c1cf15a82464130d6131f20bfbc4bc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_b = \\left[\\eta_1\\quad \\eta_2\\quad \\dots \\quad \\eta_b\\right]" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_b" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": " is defined analogously on " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "\\frac{\\beta_b}{\\beta_i}\\mathrm{s}" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": ". For the sake of stability in training, we use " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "\\operatorname{Sign}(x)\\approx \\tanh (\\alpha x)" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "|x| = \\sqrt{x^2 + \\epsilon}" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "\\epsilon >0" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": " is a small number (i.e., " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "\\epsilon = 1e - 6" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": "). As discussed in Equation 24, the case that " + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "inline_equation", + "content": "q\\neq 2" + }, + { + "bbox": [ + 67, + 331, + 563, + 384 + ], + "type": "text", + "content": " appears as a normalization term on the memory. Similar to Titans (Behrouz et al. 2024c) and TTT (Sun et al. 2024), we do not apply this non-linearity inside each chunk and instead use it at the end of each chunk." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "spans": [ + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "text", + "content": "For YAAD, the process is very similar to the above. We calculate the gradient of both " + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "text", + "content": " loss and use a masking based on " + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{M}(\\mathbf{k}_t) - \\mathbf{v}_t\\| \\leq \\delta_t" + }, + { + "bbox": [ + 67, + 389, + 562, + 415 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "spans": [ + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "text", + "content": "For MEMORA, the update rule has two non-linear parts, i.e., softmax and log, making the model hardly parallelizable. To this end, as discussed above, we use its linear version inside each chunk and its non-linear version across chunks. However, using both log and softmax at the end of each chunk removes the effect of log. To this end, we consider a lag tokens after each chunk (i.e., tokens with index " + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "inline_equation", + "content": "i = kb + 1" + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "text", + "content": " is the chunk size and " + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "inline_equation", + "content": "k \\in \\mathbb{Z}^+" + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "text", + "content": "). That is, let " + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_0" + }, + { + "bbox": [ + 67, + 419, + 563, + 479 + ], + "type": "text", + "content": " be the last state of the memory in previous chunk, we have:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 215, + 489, + 563, + 501 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 489, + 563, + 501 + ], + "spans": [ + { + "bbox": [ + 215, + 489, + 563, + 501 + ], + "type": "interline_equation", + "content": "\\mathcal {M} _ {1} = \\operatorname {S o f t m a x} \\left(\\alpha_ {1} \\log \\left(\\mathcal {M} _ {0}\\right) - \\eta_ {1} \\nabla \\ell_ {2} \\left(\\mathcal {M} _ {0}; \\mathbf {k} _ {1}, \\mathbf {v} _ {1}\\right)\\right), \\tag {30}", + "image_path": "74a4facb0826e1f05b0cb7f320e37e06f8cea5e9aedfe84d38e0340bed346b7f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "spans": [ + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "type": "text", + "content": "and then we use " + }, + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_1" + }, + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "type": "text", + "content": " for the next chunk. Again, for the sake of clarity, assume that memory is linear, i.e., " + }, + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_1 = W_1" + }, + { + "bbox": [ + 67, + 510, + 533, + 522 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 205, + 531, + 563, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 531, + 563, + 578 + ], + "spans": [ + { + "bbox": [ + 205, + 531, + 563, + 578 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla \\ell \\left(W _ {1}; \\mathbf {k} _ {t}, \\mathbf {v} _ {t}\\right) = \\left(W _ {1} \\mathbf {k} _ {t} - \\mathbf {v} _ {t}\\right) \\mathbf {k} _ {t} ^ {\\top} (31) \\\\ \\Rightarrow \\sum_ {i = 1} ^ {b} \\eta_ {i} \\frac {\\beta_ {b}}{\\beta_ {i}} \\nabla \\ell \\left(W _ {1};; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) = \\mathbf {E} _ {b} \\odot \\mathbf {B} _ {b} \\odot \\left(W _ {1} \\mathbf {K} - \\mathbf {V}\\right) \\mathbf {K} ^ {\\top}, (32) \\\\ \\end{array}", + "image_path": "d848497141f65c220dabb2ecd3e4dc8baf38a442a81f8389e5e292b1fbee3e99.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 586, + 298, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 298, + 598 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 298, + 598 + ], + "type": "text", + "content": "where matrices are defined the same as for Equation 29." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 616, + 178, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 616, + 178, + 632 + ], + "spans": [ + { + "bbox": [ + 69, + 616, + 178, + 632 + ], + "type": "text", + "content": "6 Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 640, + 562, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 562, + 689 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 562, + 689 + ], + "type": "text", + "content": "In our experimental evaluations, we aim to answer three main questions: (1) Does different attentional biases results in different architectures in practice? (2) How does different types of retention gates (i.e., retention gate) affect the performance of the model in long context? (3) How do MEMORA, MONETA, and YAAD perform in downstream tasks compare to baselines?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 699, + 563, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 699, + 563, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 699, + 563, + 723 + ], + "type": "text", + "content": "Setup. We train our models with training context window of size 4096 using either FineWeb-Edu dataset (Penedo et al. 2024) (for LM and common-sense reasoning tasks) or C4 dataset (Raffel et al. 2020) (for scaling patterns). We use model" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 72, + 231, + 171 + ], + "blocks": [ + { + "bbox": [ + 72, + 72, + 231, + 171 + ], + "lines": [ + { + "bbox": [ + 72, + 72, + 231, + 171 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 231, + 171 + ], + "type": "image", + "image_path": "23e8a3c068128a8a12d8568ecdde19b54a9ecb901162d0c610604a0abdd79fbe.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 180, + 563, + 205 + ], + "lines": [ + { + "bbox": [ + 67, + 180, + 563, + 205 + ], + "spans": [ + { + "bbox": [ + 67, + 180, + 563, + 205 + ], + "type": "text", + "content": "Figure 3: Scaling patterns when increasing (Left) model size, (Middle) sequence length (model size = 340M) (3) (Right) sequence length (model size = 760M) on C4 dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 238, + 72, + 395, + 171 + ], + "blocks": [ + { + "bbox": [ + 238, + 72, + 395, + 171 + ], + "lines": [ + { + "bbox": [ + 238, + 72, + 395, + 171 + ], + "spans": [ + { + "bbox": [ + 238, + 72, + 395, + 171 + ], + "type": "image", + "image_path": "bb38c6f7c2bb840b3f533a645aa391e2005cae92f583aa262a8d6028d4e35f08.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 403, + 72, + 561, + 171 + ], + "blocks": [ + { + "bbox": [ + 403, + 72, + 561, + 171 + ], + "lines": [ + { + "bbox": [ + 403, + 72, + 561, + 171 + ], + "spans": [ + { + "bbox": [ + 403, + 72, + 561, + 171 + ], + "type": "image", + "image_path": "1e71d292b87b5b94660e97445a73eff75c18a71916cb028bc365fee679360b73.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 226, + 563, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 226, + 563, + 262 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 563, + 262 + ], + "type": "text", + "content": "sizes of 120M, 340M, 760M, and 1.3B parameters. We train small models (120M and 340M) on 15B tokens sampled from the dataset, the medium size model (760M) on 30B tokens, and the large model on 100B tokens. Baseline results are reported by Behrouz et al. (2024c)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 277, + 377, + 291 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 277, + 377, + 291 + ], + "spans": [ + { + "bbox": [ + 67, + 277, + 377, + 291 + ], + "type": "text", + "content": "6.1 Language Modeling and Common-sense Reasoning" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 296, + 564, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 296, + 564, + 380 + ], + "spans": [ + { + "bbox": [ + 67, + 296, + 564, + 380 + ], + "type": "text", + "content": "We follow recent studies (Behrouz et al. 2024c; Yang et al. 2024a,c) and first focus on the perplexity in language modeling and also commonsense reasoning tasks. The results for MEMORA, YAAD, MONETA and also baselines with size of 340M, 760, and 1.3B are reported in Table 2. All of our variants outperforms all the baselines including Transformer++, modern linear recurrent models and hybrid methods. The superior performance compared to hybrid models is particularly important as all of our variants are pure recurrent (attention-free). Among the three variants of MirAS, while MONETA achieves slightly weaker performance than MEMORA, and YAAD, the other two variants are close and depending on the task and model size, the best model can vary." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 394, + 181, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 394, + 181, + 407 + ], + "spans": [ + { + "bbox": [ + 69, + 394, + 181, + 407 + ], + "type": "text", + "content": "6.2 Scaling Pattern" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 414, + 562, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 414, + 562, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 414, + 562, + 439 + ], + "type": "text", + "content": "To evaluate the scaling pattern of models and for comparing them with baseline, in this section, we plot their performance with varying the model size and the context window." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 449, + 563, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 563, + 534 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 563, + 534 + ], + "type": "text", + "content": "Context Length. We first vary the training context length from 2K to 32K for two versions of our model with size 340M and 760M. The results are reported in Figure 3 (Middle and Right). All three variants of Miras scales better than state-of-the-art baselines when increasing the context length. We attribute this superior performance to: (1) expressive memory architecture. Contrary to baselines like Mamba2 and GSA that uses vector- and matrix-valued memory, our variants are using 2-layer MLPs with more expressive power to learn from longer sequences. (2) The choice of retention gate and attentional bias: All of our three variants go beyond the standard attentional biases and retention gates. These choices can help the memory to better manage its fixed-size capacity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 543, + 563, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 543, + 563, + 582 + ], + "spans": [ + { + "bbox": [ + 67, + 543, + 563, + 582 + ], + "type": "text", + "content": "Model Size. We also report the #FLOPs vs. perplexity of our models and baselines in Figure 3 (Left). All three variants outperforms all baselines given almost the same budget of FLOPs. These results, once again support the importance of powerful memory design." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 594, + 205, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 594, + 205, + 608 + ], + "spans": [ + { + "bbox": [ + 69, + 594, + 205, + 608 + ], + "type": "text", + "content": "6.3 Needle In Haystack" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 613, + 564, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 564, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 564, + 696 + ], + "type": "text", + "content": "To evaluate the effective context window of our models and baselines, we use needle-in-haystack task. In this task, we evaluate the model on retrieving a piece of information (i.e., the \"needle\") from long distractor texts (i.e., the \"haystack\"). We focus on the Single NIAH (S-NIAH) task from RULER benchmark (Hsieh et al. 2024) and evaluate our models and baselines on sequences with length 1K, 2K, 4K, and 8K. The results are reported in Table 3. All our variants outperforms all the baselines with a considerable margin. Interestingly, MONETA shows better performance than others when the data is synthetic noise (S-NIAH-PK). This observation validates the effectiveness of " + }, + { + "bbox": [ + 67, + 613, + 564, + 696 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 613, + 564, + 696 + ], + "type": "text", + "content": "-norm objective and retention gates as they are more robust to noise." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 96, + 123, + 536, + 602 + ], + "blocks": [ + { + "bbox": [ + 67, + 72, + 564, + 114 + ], + "lines": [ + { + "bbox": [ + 67, + 72, + 564, + 114 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 564, + 114 + ], + "type": "text", + "content": "Table 2: Performance of MIRAS's variants and baselines on language modeling and common-sense reasoning tasks. Hybrid models are marked with *. The best results of simple and hybrid models are highlighted. In largest scale, we compare our simple models with even hybrid models and highlight the best results." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 96, + 123, + 536, + 602 + ], + "lines": [ + { + "bbox": [ + 96, + 123, + 536, + 602 + ], + "spans": [ + { + "bbox": [ + 96, + 123, + 536, + 602 + ], + "type": "table", + "html": "
ModelWiki. ppl ↓LMB. ppl ↓LMB. acc ↑PIQA acc ↑Hella. acc_n ↑Wino. acc ↑ARC-e acc ↑ARC-c acc_n ↑SIQA acc ↑BoolQ acc ↑
340M params / 15B tokens
Transformer++31.5241.0830.7662.9834.7650.5345.2124.0536.8158.24
RetNet32.5049.7328.2462.6134.1550.9144.2723.6236.7959.72
GLA28.5143.0228.7364.0535.9650.0054.1924.2937.1358.39
Mamba30.8340.2129.9463.7935.8849.8249.2424.5635.4160.07
DeltaNet28.6547.3028.4363.5235.9549.6352.6825.3737.9658.79
TTT27.4434.1930.0663.9735.7150.0853.0126.1137.3259.83
Gated DeltaNet27.0130.9434.1163.0838.1251.6055.2826.7734.8959.54
MONETA (ours)26.1929.3135.7063.9939.2352.0455.9627.1537.2960.22
YAAD (ours)26.6129.1134.0964.9339.8651.1254.7528.6433.8260.29
MEMORA (ours)27.1630.4433.6865.2139.1751.2353.4027.9934.159.29
760M params / 30B tokens
Transformer++25.2127.6435.7866.9242.1951.9560.3832.4639.5160.37
RetNet26.0824.4534.5167.1941.6352.0963.1732.7838.3657.92
Mamba222.9428.3733.5467.9042.7149.7763.4831.0940.0658.15
DeltaNet24.3724.6037.0666.9341.9850.6564.8731.3939.8859.02
TTT24.1723.5134.7467.2543.9250.9964.5333.8140.1659.58
Gated DeltaNet21.1822.0935.5468.0144.9550.7366.8733.0939.2159.14
Samba*20.6322.7139.7269.1947.3552.0166.9233.2038.9861.24
Gated DeltaNet-H2*19.8820.8339.1868.9548.2252.5767.0135.4939.3961.11
MONETA (ours)21.1821.9438.0269.5549.1653.0167.4736.0940.5363.18
YAAD (ours)20.9921.5737.8569.1450.0253.9367.7836.2741.0163.34
MEMORA (ours)22.2822.3138.1967.8249.3053.2863.5736.1540.9462.96
MONETA-H (ours)18.7220.1340.5970.8450.1354.1767.6436.7940.8762.43
YAAD-H (ours)18.5919.8040.2269.5150.4853.6968.0436.5540.2861.94
MEMORA-H (ours)18.2420.5539.9169.0649.8452.8866.9036.1240.9961.75
1.3B params / 100B tokens
Transformer++18.5318.3242.6070.0250.2353.5168.8335.1040.6657.09
RetNet19.0817.2740.5270.0749.1654.1467.3433.7840.7860.39
Mamba216.5612.5645.6671.8755.6755.2472.4737.8840.2060.13
DeltaNet17.7116.8842.4670.7250.9353.3568.4735.6640.2255.29
Gated DeltaNet16.4212.1746.6572.2555.7657.4571.2138.3940.6360.24
Samba*16.1313.2944.9470.9453.4255.5668.8136.1739.9662.11
Gated DeltaNet-H2*15.9112.5548.7672.1956.8857.7771.3339.0741.9161.55
MONETA (ours)15.5211.4747.8873.1656.1459.0972.5340.3241.9161.18
YAAD (ours)15.1811.8947.2372.8156.4659.0272.1440.0540.7361.86
MEMORA (ours)15.9012.0448.6773.1055.9957.3671.5537.9240.1961.34
", + "image_path": "7f3725d584d1950412336fd521c5fe3fd51caa25f4aa98854ba39b2420f74a32.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 621, + 180, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 621, + 180, + 635 + ], + "spans": [ + { + "bbox": [ + 69, + 621, + 180, + 635 + ], + "type": "text", + "content": "6.4 Ablation Study" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 641, + 563, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 641, + 563, + 665 + ], + "spans": [ + { + "bbox": [ + 67, + 641, + 563, + 665 + ], + "type": "text", + "content": "In this section we perform ablation studies to validate if different design choices that we discussed through the paper are positively contributing for achieving better results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "content": "The Effect of " + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "content": " on Performance. We first evaluate the effect of " + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "content": " on the performance of MONETA. We vary the value of " + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "inline_equation", + "content": "p \\in \\{1, 1.5, 2, 2.8, 3, 3.2, 4\\}" + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "content": " and context window from 2K to 16K. The results are reported in Figure 4. Interestingly, there is no monotone pattern when increasing the value of " + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "content": " and the best performance is achieved when " + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "inline_equation", + "content": "p = 3" + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "text", + "content": ", while " + }, + { + "bbox": [ + 67, + 675, + 564, + 712 + ], + "type": "inline_equation", + "content": "p = 4" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 157, + 104, + 476, + 213 + ], + "blocks": [ + { + "bbox": [ + 67, + 72, + 563, + 96 + ], + "lines": [ + { + "bbox": [ + 67, + 72, + 563, + 96 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 563, + 96 + ], + "type": "text", + "content": "Table 3: Performance of MONETA, YAAD, MEMORA, and baselines on NIAH task from RULER benchmark. The best results with highest accuracy are highlighted." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 157, + 104, + 476, + 213 + ], + "lines": [ + { + "bbox": [ + 157, + 104, + 476, + 213 + ], + "spans": [ + { + "bbox": [ + 157, + 104, + 476, + 213 + ], + "type": "table", + "html": "
ModelS-NIAH-PKS-NIAH-NS-NIAH-WAverage
2K4K8K2K4K8K1K2K4K
Mamba298.661.431.098.455.814.262.242.24.252.0
DeltaNet96.898.898.647.215.412.885.246.220.057.9
Gated DeltaNet89.891.490.099.291.826.486.482.624.475.8
TTT98.498.898.060.236.610.285.878.828.066.1
MONETA99.498.898.899.499.492.892.288.270.893.5
YaAD99.298.694.499.898.693.291.889.667.492.9
MEMORA99.298.892.698.499.293.292.488.270.492.1
", + "image_path": "9d623c722a5ab850992a05508faa29932404f7de735834c9391b80064d8ea420.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 233, + 563, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 233, + 563, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 233, + 563, + 258 + ], + "type": "text", + "content": "achieves the worst performance. Also, although different values of " + }, + { + "bbox": [ + 67, + 233, + 563, + 258 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 233, + 563, + 258 + ], + "type": "text", + "content": " results in different memory modules with varied performance, the scaling pattern when increasing the context length is almost the same." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "spans": [ + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": "The Effect of " + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": " on Performance. Similarly, we evaluate the effect of " + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": " by varying it in " + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "inline_equation", + "content": "\\{2, 3, 4, 5\\}" + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": ". Interestingly, contrary to " + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": ", the value of " + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": " can change the scaling pattern when increasing the context length. The main reason for this observation is that the value of " + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 268, + 563, + 316 + ], + "type": "text", + "content": " determines the retention gate and a powerful retention gate can improve the memory management, resulting in better performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "spans": [ + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "text", + "content": "The Effect of Design. To evaluate the architectural design choices, we perform an ablation study on YAAD. The results are in Table 4. The first row, reports the performance of YAAD, while (1) the second row removes the retention (i.e., " + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "inline_equation", + "content": "\\beta = 1" + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "text", + "content": "), (2) third row makes " + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "text", + "content": " input independent, (3) the third row removes " + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "text", + "content": "-loss from the Huber loss, (4) the forth row removes the " + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 67, + 327, + 563, + 388 + ], + "type": "text", + "content": " condition, and (5) the last row replaces the MLP with a linear layer. These results indicate that all design choices are contributing to the performance of the model." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 72, + 411, + 214, + 499 + ], + "blocks": [ + { + "bbox": [ + 72, + 411, + 214, + 499 + ], + "lines": [ + { + "bbox": [ + 72, + 411, + 214, + 499 + ], + "spans": [ + { + "bbox": [ + 72, + 411, + 214, + 499 + ], + "type": "image", + "image_path": "a0ae2ecc3a1fdcbb355b80baff04bab982ec5c72538465ed489eb68b9ab78288.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "lines": [ + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "spans": [ + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "type": "text", + "content": "Figure 4: The effect of parameters " + }, + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 509, + 367, + 533 + ], + "type": "text", + "content": " on the performance with different context length." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 222, + 409, + 365, + 499 + ], + "blocks": [ + { + "bbox": [ + 222, + 409, + 365, + 499 + ], + "lines": [ + { + "bbox": [ + 222, + 409, + 365, + 499 + ], + "spans": [ + { + "bbox": [ + 222, + 409, + 365, + 499 + ], + "type": "image", + "image_path": "85a921c0e5ac0811c346988a447963c727a9058481fbea39ac62dc440e750ccb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 413, + 448, + 553, + 544 + ], + "blocks": [ + { + "bbox": [ + 402, + 415, + 563, + 439 + ], + "lines": [ + { + "bbox": [ + 402, + 415, + 563, + 439 + ], + "spans": [ + { + "bbox": [ + 402, + 415, + 563, + 439 + ], + "type": "text", + "content": "Table 4: Ablation study on the components of YAAD." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 413, + 448, + 553, + 544 + ], + "lines": [ + { + "bbox": [ + 413, + 448, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 413, + 448, + 553, + 544 + ], + "type": "table", + "html": "
ModelAvg. LM
YAAD53.98
- Retention Gate50.63
- Input-dependent δ52.19
l2-loss52.86
l1-loss53.04
linear memory51.57
", + "image_path": "f5b314e89c68e81cb2f087136de86e716ba11e0e457e1752e6ffa962a3bddd22.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 558, + 168, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 558, + 168, + 572 + ], + "spans": [ + { + "bbox": [ + 69, + 558, + 168, + 572 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 583, + 563, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 583, + 563, + 667 + ], + "spans": [ + { + "bbox": [ + 67, + 583, + 563, + 667 + ], + "type": "text", + "content": "In this paper, we present MIRAS, a general framework that explains the connection of online optimization and test time memorization. MIRAS framework can explain the role of several standard architectural choices in the literature (e.g., forget gate) and helps design next generation of architectures that are capable of managing the memory better. Building upon our framework, we present three novel sequence models, each of which with its own (dis)advantages. Our experimental evaluations show that all these variants are more powerful than Transformers and linear RNNs, in various downstream tasks. In this work, we present a diverse set of variants using MIRAS. In future, exploring these alternative architectures for different downstream tasks is an interesting future direction." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 144, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 144, + 84 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 144, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 91, + 564, + 689 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 73, + 91, + 564, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 91, + 564, + 127 + ], + "spans": [ + { + "bbox": [ + 73, + 91, + 564, + 127 + ], + "type": "text", + "content": "[1] Ali Behrouz, Parsa Delavari, and Farnoosh Hashemi. \"Unsupervised Representation Learning of Brain Activity via Bridging Voxel Activity and Functional Connectivity\". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=n0jZfpLyh1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 127, + 563, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 127, + 563, + 150 + ], + "spans": [ + { + "bbox": [ + 75, + 127, + 563, + 150 + ], + "type": "text", + "content": "[2] Ali Behrouz, Michele Santacatterina, and Ramin Zabih. \"Mambamixer: Efficient selective state space models with dual token and channel selection\". In: arXiv preprint arXiv:2403.19888 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 151, + 563, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 151, + 563, + 175 + ], + "spans": [ + { + "bbox": [ + 75, + 151, + 563, + 175 + ], + "type": "text", + "content": "[3] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. \"Titans: Learning to memorize at test time\". In: arXiv preprint arXiv:2501.00663 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 175, + 562, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 175, + 562, + 199 + ], + "spans": [ + { + "bbox": [ + 75, + 175, + 562, + 199 + ], + "type": "text", + "content": "[4] Alberto Bietti, Vivien Cabannes, Diane Bouchacourt, Herve Jegou, and Leon Bottou. \"Birth of a transformer: A memory viewpoint\". In: Advances in Neural Information Processing Systems 36 (2023), pp. 1560-1588." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 199, + 561, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 199, + 561, + 223 + ], + "spans": [ + { + "bbox": [ + 75, + 199, + 561, + 223 + ], + "type": "text", + "content": "[5] Yonatan Bisk, Rowan Zellers, Jianfeng Gao, Yejin Choi, et al. \"Piqa: Reasoning about physical commonsense in natural language\". In: Proceedings of the AAAI conference on artificial intelligence. Vol. 34. 2020, pp. 7432-7439." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 223, + 545, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 223, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 75, + 223, + 545, + 236 + ], + "type": "text", + "content": "[6] Leon Bottou and Vladimir Vapnik. \"Local learning algorithms\". In: Neural computation 4.6 (1992), pp. 888-900." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 236, + 563, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 236, + 563, + 306 + ], + "spans": [ + { + "bbox": [ + 75, + 236, + 563, + 306 + ], + "type": "text", + "content": "[7] Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. \"BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions\". In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). Ed. by Jill Burstein, Christy Doran, and Thamar Solorio. Minneapolis, Minnesota: Association for Computational Linguistics, June 2019, pp. 2924-2936. DOI: 10.18653/v1/N19-1300. URL: https://aclanthology.org/N19-1300/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 306, + 563, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 306, + 563, + 342 + ], + "spans": [ + { + "bbox": [ + 75, + 306, + 563, + 342 + ], + "type": "text", + "content": "[8] Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. \"Think you have solved question answering? try arc, the ai2 reasoning challenge\". In: arXiv preprint arXiv:1803.05457 (2018)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 342, + 563, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 342, + 563, + 366 + ], + "spans": [ + { + "bbox": [ + 75, + 342, + 563, + 366 + ], + "type": "text", + "content": "[9] Imre Csiszar. \"On information-type measure of difference of probability distributions and indirect observations\". In: Studia Sci. Math. Hungar. 2 (1967), pp. 299-318." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 366, + 563, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 366, + 563, + 403 + ], + "spans": [ + { + "bbox": [ + 70, + 366, + 563, + 403 + ], + "type": "text", + "content": "[10] Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. \"Recurrent Neural Networks Learn to Store and Generate Sequences using Non-Linear Representations\". In: Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP. 2024, pp. 248-262." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 403, + 563, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 403, + 563, + 437 + ], + "spans": [ + { + "bbox": [ + 70, + 403, + 563, + 437 + ], + "type": "text", + "content": "[11] Karan Dalal, Daniel Koceja, Gashon Hussein, Jiarui Xu, Yue Zhao, Youjin Song, Shihao Han, Ka Chun Cheung, Jan Kautz, Carlos Guestrin, et al. \"One-Minute Video Generation with Test-Time Training\". In: arXiv preprint arXiv:2504.05298 (2025)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 438, + 563, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 438, + 563, + 462 + ], + "spans": [ + { + "bbox": [ + 70, + 438, + 563, + 462 + ], + "type": "text", + "content": "[12] Tri Dao and Albert Gu. \"Transformers are SSMs: Generalized models and efficient algorithms through structured state space duality\". In: arXiv preprint arXiv:2405.21060 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 462, + 563, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 462, + 563, + 498 + ], + "spans": [ + { + "bbox": [ + 70, + 462, + 563, + 498 + ], + "type": "text", + "content": "[13] Soham De, Samuel L Smith, Anushan Fernando, Aleksandar Botev, George Cristian-Muraru, Albert Gu, Ruba Haroun, Leonard Berrada, Yutian Chen, Srivatsan Srinivasan, et al. \"Griffin: Mixing gated linear recurrences with local attention for efficient language models\". In: arXiv preprint arXiv:2402.19427 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 498, + 563, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 498, + 563, + 534 + ], + "spans": [ + { + "bbox": [ + 70, + 498, + 563, + 534 + ], + "type": "text", + "content": "[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. \"An image is worth 16x16 words: Transformers for image recognition at scale\". In: arXiv preprint arXiv:2010.11929 (2020)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 534, + 563, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 534, + 563, + 557 + ], + "spans": [ + { + "bbox": [ + 70, + 534, + 563, + 557 + ], + "type": "text", + "content": "[15] Yossi Gandelsman, Yu Sun, Xinlei Chen, and Alexei Efros. \"Test-time training with masked autoencoders\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 29374-29385." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 558, + 563, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 558, + 563, + 582 + ], + "spans": [ + { + "bbox": [ + 70, + 558, + 563, + 582 + ], + "type": "text", + "content": "[16] Xavier Gonzalez, Andrew Warrington, Jimmy Smith, and Scott Linderman. \"Towards scalable and stable parallelization of nonlinear rnns\". In: Advances in Neural Information Processing Systems 37 (2024), pp. 5817-5849." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 582, + 563, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 582, + 563, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 582, + 563, + 605 + ], + "type": "text", + "content": "[17] Riccardo Grazzi, Julien Siems, Jörg KH Franke, Arber Zela, Frank Hutter, and Massimiliano Pontil. \"Unlocking state-tracking in linear rnns through negative eigenvalues\". In: arXiv preprint arXiv:2411.12537 (2024)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 605, + 563, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 605, + 563, + 629 + ], + "spans": [ + { + "bbox": [ + 70, + 605, + 563, + 629 + ], + "type": "text", + "content": "[18] Klaus Greff, Rupesh K Srivastava, Jan Koutnk, Bas R Steunebrink, and Jürgen Schmidhuber. \"LSTM: A search space odyssey\". In: IEEE transactions on neural networks and learning systems 28.10 (2016), pp. 2222-2232." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 629, + 563, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 629, + 563, + 653 + ], + "spans": [ + { + "bbox": [ + 70, + 629, + 563, + 653 + ], + "type": "text", + "content": "[19] Albert Gu and Tri Dao. \"Mamba: Linear-Time Sequence Modeling with Selective State Spaces\". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=tEYskw1VY2." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 653, + 563, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 653, + 563, + 689 + ], + "spans": [ + { + "bbox": [ + 70, + 653, + 563, + 689 + ], + "type": "text", + "content": "[20] Albert Gu, Karan Goel, and Christopher Re. \"Efficiently Modeling Long Sequences with Structured State Spaces\". In: International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=uYLFOz1v1AC." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 72, + 564, + 708 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 70, + 72, + 564, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 564, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 564, + 108 + ], + "type": "text", + "content": "[21] Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. \"Liquid Structural State-Space Models\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=g4OTKRKfS7R." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 109, + 495, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 109, + 495, + 121 + ], + "spans": [ + { + "bbox": [ + 70, + 109, + 495, + 121 + ], + "type": "text", + "content": "[22]Trevor Hastie, Robert Tibshirani, Jerome Friedman, et al. The elements of statistical learning. 2009." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 122, + 563, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 122, + 563, + 144 + ], + "spans": [ + { + "bbox": [ + 70, + 122, + 563, + 144 + ], + "type": "text", + "content": "[23] Elad Hazan et al. \"Introduction to online convex optimization\". In: Foundations and Trends® in Optimization 2.3-4 (2016), pp. 157-325." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 145, + 518, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 145, + 518, + 157 + ], + "spans": [ + { + "bbox": [ + 70, + 145, + 518, + 157 + ], + "type": "text", + "content": "[24] Donald Olding Hebb. The organization of behavior: A neuropsychological theory. Psychology press, 2005." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 158, + 561, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 158, + 561, + 169 + ], + "spans": [ + { + "bbox": [ + 70, + 158, + 561, + 169 + ], + "type": "text", + "content": "[25] Dan Hendrycks and Kevin Gimpel. \"Gaussian error linear units (gelus)\". In: arXiv preprint arXiv:1606.08415 (2016)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 170, + 563, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 563, + 192 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 563, + 192 + ], + "type": "text", + "content": "[26] Donald E Hilt and Donald W Seegrist. Ridge, a computer program for calculating ridge regression estimates. Vol. 236. Department of Agriculture, Forest Service, Northeastern Forest Experiment ..., 1977." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 193, + 562, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 193, + 562, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 193, + 562, + 217 + ], + "type": "text", + "content": "[27] Arthur E Hoerl and Robert W Kennard. \"Ridge regression: applications to nonorthogonal problems\". In: Technometrics 12.1 (1970), pp. 69-82." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "spans": [ + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "type": "text", + "content": "[28] John J Hopfield. “Neural networks and physical systems with emergent collective computational abilities.” In: Proceedings of the national academy of sciences 79.8 (1982), pp. 2554-2558." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 241, + 563, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 241, + 563, + 276 + ], + "spans": [ + { + "bbox": [ + 70, + 241, + 563, + 276 + ], + "type": "text", + "content": "[29] Cheng-Ping Hsieh, Simeng Sun, Samuel Kriman, Shantanu Acharya, Dima Rekesh, Fei Jia, and Boris Ginsburg. \"RULER: What's the Real Context Size of Your Long-Context Language Models?\" In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=kIoBbc76Sy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 277, + 563, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 277, + 563, + 300 + ], + "spans": [ + { + "bbox": [ + 70, + 277, + 563, + 300 + ], + "type": "text", + "content": "[30] Jerry Yao-Chieh Hu, Dennis Wu, and Han Liu. \"Provably optimal memory capacity for modern hopfield models: Transformer-compatible dense associative memories as spherical codes\". In: arXiv preprint arXiv:2410.23126 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 301, + 563, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 301, + 563, + 324 + ], + "spans": [ + { + "bbox": [ + 70, + 301, + 563, + 324 + ], + "type": "text", + "content": "[31] Peter J Huber. \"Robust estimation of a location parameter\". In: Breakthroughs in statistics: Methodology and distribution. Springer, 1992, pp. 492-518." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 325, + 562, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 325, + 562, + 348 + ], + "spans": [ + { + "bbox": [ + 70, + 325, + 562, + 348 + ], + "type": "text", + "content": "[32] Kazuki Irie, Robert Csordas, and Jürgen Schmidhuber. \"Practical computational power of linear transformers and their recurrent and self-referential extensions\". In: arXiv preprint arXiv:2310.16076 (2023)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 349, + 563, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 349, + 563, + 372 + ], + "spans": [ + { + "bbox": [ + 70, + 349, + 563, + 372 + ], + "type": "text", + "content": "[33] Kazuki Irie, Imanol Schlag, Robert Csordas, and Jurgen Schmidhuber. \"Going beyond linear transformers with recurrent fast weight programmers\". In: Advances in neural information processing systems 34 (2021), pp. 7703-7717." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 373, + 563, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 563, + 396 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 563, + 396 + ], + "type": "text", + "content": "[34] Vidit Jain and Erik Learned-Miller. \"Online domain adaptation of a pre-trained cascade of classifiers\". In: CVPR 2011. IEEE. 2011, pp. 577-584." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 396, + 563, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 396, + 563, + 431 + ], + "spans": [ + { + "bbox": [ + 70, + 396, + 563, + 431 + ], + "type": "text", + "content": "[35] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. \"Scaling laws for neural language models\". In: arXiv preprint arXiv:2001.08361 (2020)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 432, + 448, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 432, + 448, + 444 + ], + "spans": [ + { + "bbox": [ + 70, + 432, + 448, + 444 + ], + "type": "text", + "content": "[36] M. Karami and V. Mirrokni. Lattice: Learning to Efficiently Compress the Memory. 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 445, + 563, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 445, + 563, + 479 + ], + "spans": [ + { + "bbox": [ + 70, + 445, + 563, + 479 + ], + "type": "text", + "content": "[37] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. \"Transformers are rnns: Fast autoregressive transformers with linear attention\". In: International conference on machine learning. PMLR. 2020, pp. 5156-5165." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 480, + 477, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 480, + 477, + 491 + ], + "spans": [ + { + "bbox": [ + 70, + 480, + 477, + 491 + ], + "type": "text", + "content": "[38] Dmitry Krotov. \"Hierarchical associative memory\". In: arXiv preprint arXiv:2107.06446 (2021)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 492, + 563, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 492, + 563, + 516 + ], + "spans": [ + { + "bbox": [ + 70, + 492, + 563, + 516 + ], + "type": "text", + "content": "[39] Dmitry Krotov and John J Hopfield. “Dense associative memory for pattern recognition”. In: Advances in neural information processing systems 29 (2016)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 517, + 563, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 517, + 563, + 551 + ], + "spans": [ + { + "bbox": [ + 70, + 517, + 563, + 551 + ], + "type": "text", + "content": "[40] Aonian Li, Bangwei Gong, Bo Yang, Boji Shan, Chang Liu, Cheng Zhu, Chunhao Zhang, Congchao Guo, Da Chen, Dong Li, et al. \"Minimax-01: Scaling foundation models with lightning attention\". In: arXiv preprint arXiv:2501.08313 (2025)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 552, + 563, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 552, + 563, + 575 + ], + "spans": [ + { + "bbox": [ + 70, + 552, + 563, + 575 + ], + "type": "text", + "content": "[41] Chengxuan Li, Di Huang, Zeyu Lu, Yang Xiao, Qingqi Pei, and Lei Bai. “A survey on long video generation: Challenges, methods, and prospects”. In: arXiv preprint arXiv:2403.16407 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 576, + 563, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 576, + 563, + 599 + ], + "spans": [ + { + "bbox": [ + 70, + 576, + 563, + 599 + ], + "type": "text", + "content": "[42] Xiaoyu Li, Yuanpeng Li, Yingyu Liang, Zhenmei Shi, and Zhao Song. \"On the expressive power of modern hopfield networks\". In: arXiv preprint arXiv:2412.05562 (2024)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 600, + 563, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 600, + 563, + 634 + ], + "spans": [ + { + "bbox": [ + 70, + 600, + 563, + 634 + ], + "type": "text", + "content": "[43] Yi Heng Lim, Qi Zhu, Joshua Selfridge, and Muhammad Firmansyah Kasim. \"Parallelizing non-linear sequential models over the sequence length\". In: The Twelfth International Conference on Learning Representations. 2024. URL: https://openreview.net/forum?id=E34A1VLN0v." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 635, + 563, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 635, + 563, + 658 + ], + "spans": [ + { + "bbox": [ + 70, + 635, + 563, + 658 + ], + "type": "text", + "content": "[44] Bo Liu, Rui Wang, Lemeng Wu, Yihao Feng, Peter Stone, and Qiang Liu. \"Longhorn: State space models are amortized online learners\". In: arXiv preprint arXiv:2407.14207 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 70, + 659, + 563, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 659, + 563, + 695 + ], + "spans": [ + { + "bbox": [ + 70, + 659, + 563, + 695 + ], + "type": "text", + "content": "[45] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. \"Lost in the middle: How language models use long contexts\". In: Transactions of the Association for Computational Linguistics 12 (2024), pp. 157-173." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 70, + 696, + 506, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 506, + 708 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 506, + 708 + ], + "type": "text", + "content": "[46] Elizabeth F Loftus. \"The reality of repressed memories.\" In: American psychologist 48.5 (1993), p. 518." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 564, + 696 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 69, + 72, + 564, + 96 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 564, + 96 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 564, + 96 + ], + "type": "text", + "content": "[47] Carlo Lucibello and Marc Mézard. \"Exponential capacity of dense associative memories\". In: Physical Review Letters 132.7 (2024), p. 077301." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 97, + 564, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 97, + 564, + 121 + ], + "spans": [ + { + "bbox": [ + 69, + 97, + 564, + 121 + ], + "type": "text", + "content": "[48] Julien Mairal. \"Incremental majorization-minimization optimization with application to large-scale machine learning\". In: SIAM Journal on Optimization 25.2 (2015), pp. 829-855." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 121, + 564, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 121, + 564, + 156 + ], + "spans": [ + { + "bbox": [ + 69, + 121, + 564, + 156 + ], + "type": "text", + "content": "[49] Harsh Mehta, Ankit Gupta, Ashok Cutkosky, and Behnam Neyshabur. \"Long Range Language Modeling via Gated State Spaces\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=5MkYIYCbva." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 157, + 563, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 157, + 563, + 180 + ], + "spans": [ + { + "bbox": [ + 70, + 157, + 563, + 180 + ], + "type": "text", + "content": "[50] Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. “Pointer Sentinel Mixture Models”. In: International Conference on Learning Representations. 2017. URL: https://openreview.net/forum?id=Byj72udxe." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 181, + 563, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 181, + 563, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 181, + 563, + 205 + ], + "type": "text", + "content": "[51] William Merrill, Jackson Petty, and Ashish Sabharwal. \"The Illusion of State in State-Space Models\". In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=QZgo9JZpLq." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 205, + 563, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 205, + 563, + 240 + ], + "spans": [ + { + "bbox": [ + 70, + 205, + 563, + 240 + ], + "type": "text", + "content": "[52] Ravi Teja Mullapudi, Steven Chen, Keyi Zhang, Deva Ramanan, and Kayvon Fatahalian. \"Online model distillation for efficient video inference\". In: Proceedings of the IEEE/CVF International conference on computer vision. 2019, pp. 3573-3582." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 241, + 563, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 241, + 563, + 264 + ], + "spans": [ + { + "bbox": [ + 70, + 241, + 563, + 264 + ], + "type": "text", + "content": "[53] Tsendsuren Munkhdalai, Alessandro Sordoni, Tong Wang, and Adam Trischler. “Metalearned neural memory”. In: Advances in Neural Information Processing Systems 32 (2019)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 265, + 563, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 265, + 563, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 265, + 563, + 289 + ], + "type": "text", + "content": "[54] Tsendsuren Munkhdalai and Hong Yu. \"Neural semantic encoders\". In: Proceedings of the conference. Association for Computational Linguistics. Meeting. Vol. 1. NIH Public Access. 2017, p. 397." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 289, + 563, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 289, + 563, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 289, + 563, + 312 + ], + "type": "text", + "content": "[55] Daniel Neil, Jun Haeng Lee, Tobi Delbruck, and Shih-Chii Liu. \"Delta networks for optimized recurrent network computation\". In: International conference on machine learning. PMLR. 2017, pp. 2584-2593." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 313, + 563, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 313, + 563, + 336 + ], + "spans": [ + { + "bbox": [ + 70, + 313, + 563, + 336 + ], + "type": "text", + "content": "[56] Hideyuki Okano, Tomoo Hirano, and Evan Balaban. \"Learning and memory\". In: Proceedings of the National Academy of Sciences 97.23 (2000), pp. 12403-12404." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 337, + 563, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 337, + 563, + 372 + ], + "spans": [ + { + "bbox": [ + 70, + 337, + 563, + 372 + ], + "type": "text", + "content": "[57] Antonio Orvieto, Samuel L Smith, Albert Gu, Anushan Fernando, Caglar Gulcehre, Razvan Pascanu, and Soham De. \"Resurrecting recurrent neural networks for long sequences\". In: International Conference on Machine Learning. PMLR. 2023, pp. 26670-26698." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 373, + 563, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 563, + 431 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 563, + 431 + ], + "type": "text", + "content": "[58] Denis Paperno, German Kruszewski, Angeliki Lazaridou, Ngoc Quan Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernandez. \"The LAMBADA dataset: Word prediction requiring a broad discourse context\". In: Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Ed. by Katrin Erk and Noah A. Smith. Berlin, Germany: Association for Computational Linguistics, Aug. 2016, pp. 1525-1534. DOI: 10.18653/v1/P16-1144. URL: https://aclanthology.org/P16-1144/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 432, + 563, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 432, + 563, + 479 + ], + "spans": [ + { + "bbox": [ + 70, + 432, + 563, + 479 + ], + "type": "text", + "content": "[59] Guilherme Penedo, Hynek Kydlcek, Loubna Ben allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro Von Werra, and Thomas Wolf. \"The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale\". In: The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2024. URL: https://openreview.net/forum?id=n6Sckn2QaG." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 480, + 564, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 480, + 564, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 480, + 564, + 563 + ], + "type": "text", + "content": "[60] Bo Peng, Eric Alcaide, Quentin Gregory Anthony, Alon Albalak, Samuel Arcadinho, Stella Biderman, Huanqi Cao, Xin Cheng, Michael Nguyen Chung, Leon Derczynski, Xingjian Du, Matteo Grella, Kranthi Kiran GV, Xuzheng He, Haowen Hou, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, Jiaju Lin, Krishna Sri Ipsit Mantri, Ferdinand Mom, Atsushi Saito, Guangyu Song, Xiangru Tang, Johan S. Wind, Stanisław Wozniak, Zhenyuan Zhang, Qinghua Zhou, Jian Zhu, and Rui-Jie Zhu. \"RWKV: Reinventing RNNs for the Transformer Era\". In: The 2023 Conference on Empirical Methods in Natural Language Processing. 2023. URL: https://openreview.net/forum?id=7SaXczaBpG." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 563, + 564, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 563, + 564, + 599 + ], + "spans": [ + { + "bbox": [ + 69, + 563, + 564, + 599 + ], + "type": "text", + "content": "[61] Bo Peng, Daniel Goldstein, Quentin Anthony, Alon Albalak, Eric Alcaide, Stella Biderman, Eugene Cheah, Xingjian Du, Teddy Ferdinan, Haowen Hou, et al. \"Eagle and finch: Rwkv with matrix-valued states and dynamic recurrence\". In: arXiv preprint arXiv:2404.05892 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 600, + 563, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 563, + 635 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 563, + 635 + ], + "type": "text", + "content": "[62] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Saiteja Utpala, et al. \"RWKV-7\" Goose\" with Expressive Dynamic State Evolution\". In: arXiv preprint arXiv:2503.14456 (2025)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 635, + 563, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 635, + 563, + 670 + ], + "spans": [ + { + "bbox": [ + 69, + 635, + 563, + 670 + ], + "type": "text", + "content": "[63] Bo Peng, Ruichong Zhang, Daniel Goldstein, Eric Alcaide, Haowen Hou, Janna Lu, William Merrill, Guangyu Song, Kaifeng Tan, Siateja Utpala, et al. \"Rwkv-7\" goose\" with expressive dynamic state evolution\". In: arXiv preprint arXiv:2503.14456 (2025)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 671, + 553, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 671, + 553, + 683 + ], + "spans": [ + { + "bbox": [ + 69, + 671, + 553, + 683 + ], + "type": "text", + "content": "[64] Yury Polyanskiy and Yihong Wu. Information theory: From coding to learning. Cambridge university press, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 684, + 559, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 684, + 559, + 696 + ], + "spans": [ + { + "bbox": [ + 69, + 684, + 559, + 696 + ], + "type": "text", + "content": "[65] DL Prados and SC Kak. \"Neural network capacity using delta rule\". In: *Electronics Letters* 25.3 (1989), pp. 197-199." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 564, + 719 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 70, + 72, + 564, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 564, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 564, + 108 + ], + "type": "text", + "content": "[66] Zhen Qin, Songlin Yang, Weixuan Sun, Xuyang Shen, Dong Li, Weigao Sun, and Yiran Zhong. \"HGRN2: Gated Linear RNNs with State Expansion\". In: First Conference on Language Modeling. 2024. URL: https://openreview.net/forum?id=y6SqBJfCSk." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 109, + 564, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 109, + 564, + 144 + ], + "spans": [ + { + "bbox": [ + 70, + 109, + 564, + 144 + ], + "type": "text", + "content": "[67] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. \"Exploring the limits of transfer learning with a unified text-to-text transformer\". In: Journal of machine learning research 21.140 (2020), pp. 1-67." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 145, + 564, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 145, + 564, + 191 + ], + "spans": [ + { + "bbox": [ + 70, + 145, + 564, + 191 + ], + "type": "text", + "content": "[68] Hubert Ramsauer, Bernhard Schäfl, Johannes Lehner, Philipp Seidl, Michael Widrich, Lukas Gruber, Markus Holzleitner, Thomas Adler, David Kreil, Michael K Kopp, Günter Klambauer, Johannes Brandstetter, and Sepp Hochreiter. \"Hopfield Networks is All You Need\". In: International Conference on Learning Representations. 2021. URL: https://openreview.net/forum?id=tL89RnzIiCd." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 192, + 564, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 192, + 564, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 192, + 564, + 217 + ], + "type": "text", + "content": "[69] Meisam Razaviyayn, Mingyi Hong, and Zhi-Quan Luo. “A unified convergence analysis of block successive minimization methods for nonsmooth optimization”. In: SIAM Journal on Optimization 23.2 (2013), pp. 1126–1153." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "spans": [ + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "type": "text", + "content": "[70] Liliang Ren, Yang Liu, Yadong Lu, Yelong Shen, Chen Liang, and Weizhu Chen. \"Samba: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling\". In: arXiv preprint arXiv:2406.07522 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 241, + 486, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 241, + 486, + 252 + ], + "spans": [ + { + "bbox": [ + 70, + 241, + 486, + 252 + ], + "type": "text", + "content": "[71] Lee T Robertson. \"Memory and the brain\". In: Journal of dental education 66.1 (2002), pp. 30-42." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 253, + 562, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 253, + 562, + 276 + ], + "spans": [ + { + "bbox": [ + 70, + 253, + 562, + 276 + ], + "type": "text", + "content": "[72] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. \"Winogrande: An adversarial winograd schema challenge at scale\". In: Communications of the ACM 64.9 (2021), pp. 99-106." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 277, + 563, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 277, + 563, + 300 + ], + "spans": [ + { + "bbox": [ + 70, + 277, + 563, + 300 + ], + "type": "text", + "content": "[73] Imanol Schlag, Kazuki Irie, and Jürgen Schmidhuber. \"Linear transformers are secretly fast weight programmers\". In: International Conference on Machine Learning. PMLR. 2021, pp. 9355-9366." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 300, + 563, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 300, + 563, + 324 + ], + "spans": [ + { + "bbox": [ + 70, + 300, + 563, + 324 + ], + "type": "text", + "content": "[74] JH Schmidhuber. \"Learning to control fast-weight memories: An alternative to recurrent nets. Accepted for publication in\". In: Neural Computation (1992)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 324, + 563, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 563, + 360 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 563, + 360 + ], + "type": "text", + "content": "[75] Jürgen Schmidhuber. “Reducing the ratio between learning complexity and number of time varying variables in fully recurrent nets”. In: ICANN'93: Proceedings of the International Conference on Artificial Neural Networks Amsterdam, The Netherlands 13–16 September 1993 3. Springer. 1993, pp. 460–463." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 360, + 555, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 360, + 555, + 372 + ], + "spans": [ + { + "bbox": [ + 70, + 360, + 555, + 372 + ], + "type": "text", + "content": "[76] Jürgen Schmidhuber and Sepp Hochreiter. \"Long Short-term Memory\". In: Neural Computation MIT-Press (1997)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 373, + 562, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 562, + 396 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 562, + 396 + ], + "type": "text", + "content": "[77] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. \"Implicit Language Models are RNNs: Balancing Parallelization and Expressivity\". In: arXiv preprint arXiv:2502.07827 (2025)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 396, + 563, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 396, + 563, + 420 + ], + "spans": [ + { + "bbox": [ + 70, + 396, + 563, + 420 + ], + "type": "text", + "content": "[78] Shai Shalev-Shwartz et al. \"Online learning and online convex optimization\". In: Foundations and Trends® in Machine Learning 4.2 (2012), pp. 107-194." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 420, + 563, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 420, + 563, + 456 + ], + "spans": [ + { + "bbox": [ + 70, + 420, + 563, + 456 + ], + "type": "text", + "content": "[79] Julien Siems, Timur Carstensen, Arber Zela, Frank Hutter, Massimiliano Pontil, and Riccardo Grazzi. \"DeltaProduct: Increasing the Expressivity of DeltaNet Through Products of Householders\". In: arXiv preprint arXiv:2502.10297 (2025)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 456, + 563, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 456, + 563, + 491 + ], + "spans": [ + { + "bbox": [ + 70, + 456, + 563, + 491 + ], + "type": "text", + "content": "[80] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. \"Simplified State Space Layers for Sequence Modeling\". In: The Eleventh International Conference on Learning Representations. 2022. URL: https://openreview.net/forum?id=Ai8Hw3AXqks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 491, + 563, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 491, + 563, + 526 + ], + "spans": [ + { + "bbox": [ + 70, + 491, + 563, + 526 + ], + "type": "text", + "content": "[81] Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. \"Simplified State Space Layers for Sequence Modeling\". In: The Eleventh International Conference on Learning Representations. 2023. URL: https://openreview.net/forum?id=Ai8Hw3AXqks." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 528, + 563, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 528, + 563, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 528, + 563, + 552 + ], + "type": "text", + "content": "[82] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. \"Rofomer: Enhanced transformer with rotary position embedding\". In: Neurocomputing 568 (2024), p. 127063." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 552, + 563, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 552, + 563, + 587 + ], + "spans": [ + { + "bbox": [ + 70, + 552, + 563, + 587 + ], + "type": "text", + "content": "[83] Yu Sun, Xinhao Li, Karan Dalal, Jiarui Xu, Arjun Vikram, Genghan Zhang, Yann Dubois, Xinlei Chen, Xiaolong Wang, Sanmi Koyejo, et al. \"Learning to (learn at test time): Rnns with expressive hidden states\". In: arXiv preprint arXiv:2407.04620 (2024)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 587, + 562, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 587, + 562, + 611 + ], + "spans": [ + { + "bbox": [ + 70, + 587, + 562, + 611 + ], + "type": "text", + "content": "[84] Yutao Sun, Li Dong, Shaohan Huang, Shuming Ma, Yuqing Xia, Jilong Xue, Jianyong Wang, and Furu Wei. \"Retentive network: A successor to transformer for large language models\". In: arXiv preprint arXiv:2307.08621 (2023)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 611, + 490, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 611, + 490, + 623 + ], + "spans": [ + { + "bbox": [ + 70, + 611, + 490, + 623 + ], + "type": "text", + "content": "[85] W Scott Terry. Learning and memory: Basic principles, processes, and procedures. Routledge, 2017." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 624, + 563, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 624, + 563, + 647 + ], + "spans": [ + { + "bbox": [ + 70, + 624, + 563, + 647 + ], + "type": "text", + "content": "[86] Robert Tibshirani. \"Regression shrinkage and selection via the lasso\". In: Journal of the Royal Statistical Society Series B: Statistical Methodology 58.1 (1996), pp. 267-288." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 647, + 563, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 647, + 563, + 682 + ], + "spans": [ + { + "bbox": [ + 70, + 647, + 563, + 682 + ], + "type": "text", + "content": "[87] Matteo Tiezzi, Michele Casoni, Alessandro Betti, Tommaso Guidi, Marco Gori, and Stefano Melacci. \"On the resurgence of recurrent models for long sequences: Survey and research opportunities in the transformer era\". In: arXiv preprint arXiv:2402.08132 (2024)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 683, + 563, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 683, + 563, + 719 + ], + "spans": [ + { + "bbox": [ + 70, + 683, + 563, + 719 + ], + "type": "text", + "content": "[88] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. \"Llama: Open and efficient foundation language models\". In: arXiv preprint arXiv:2302.13971 (2023)." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 564, + 456 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 564, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 564, + 120 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 564, + 120 + ], + "type": "text", + "content": "[89] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. \"Attention is All you Need\". In: Advances in Neural Information Processing Systems. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 121, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 121, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 69, + 121, + 564, + 180 + ], + "type": "text", + "content": "[90] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. \"Attention is All you Need\". In: Advances in Neural Information Processing Systems. Ed. by I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett. Vol. 30. Curran Associates, Inc., 2017. URL: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 181, + 563, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 181, + 563, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 181, + 563, + 216 + ], + "type": "text", + "content": "[91] Johannes Von Oswald, Maximilian Schlegel, Alexander Meulemans, Seijin Kobayashi, Eyvind Niklasson, Nicolas Zucchet, Nino Scherrer, Nolan Miller, Mark Sandler, Max Vlademyrov, et al. \"Uncovering mesa-optimization algorithms in transformers\". In: arXiv preprint arXiv:2309.05858 (2023)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "spans": [ + { + "bbox": [ + 70, + 217, + 563, + 240 + ], + "type": "text", + "content": "[92] Ke Alexander Wang, Jiaxin Shi, and Emily B Fox. \"Test-time regression: a unifying framework for designing sequence models with associative memory\". In: arXiv preprint arXiv:2501.12352 (2025)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 241, + 563, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 241, + 563, + 264 + ], + "spans": [ + { + "bbox": [ + 70, + 241, + 563, + 264 + ], + "type": "text", + "content": "[93] Yingheng Wang, Zichen Wang, Gil Sadeh, Luca Zancato, Alessandro Achille, George Karypis, and Huzefa Rangwala. \"Long-context Protein Language Model\". In: bioRxiv (2024), pp. 2024-10." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 264, + 563, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 264, + 563, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 264, + 563, + 289 + ], + "type": "text", + "content": "[94] Songlin Yang, Jan Kautz, and Ali Hatamizadeh. “Gated Delta Networks: Improving Mamba2 with Delta Rule”. In: arXiv preprint arXiv:2412.06464 (2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 289, + 563, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 289, + 563, + 323 + ], + "spans": [ + { + "bbox": [ + 70, + 289, + 563, + 323 + ], + "type": "text", + "content": "[95] Songlin Yang, Bailin Wang, Yikang Shen, Rameswar Panda, and Yoon Kim. “Gated Linear Attention Transformers with Hardware-Efficient Training”. In: Forty-first International Conference on Machine Learning. 2024. URL: https://openreview.net/forum?id=ia5XvxFUJT." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 324, + 563, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 563, + 348 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 563, + 348 + ], + "type": "text", + "content": "[96] Songlin Yang, Bailin Wang, Yu Zhang, Yikang Shen, and Yoon Kim. \"Parallelizing linear transformers with the delta rule over sequence length\". In: Advances in Neural Information Processing Systems 37 (2024), pp. 115491-115522." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 348, + 563, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 348, + 563, + 396 + ], + "spans": [ + { + "bbox": [ + 69, + 348, + 563, + 396 + ], + "type": "text", + "content": "[97] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. \"HellaSwag: Can a Machine Really Finish Your Sentence?\" In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Ed. by Anna Korhonen, David Traum, and Lluis Marquez. Florence, Italy: Association for Computational Linguistics, July 2019, pp. 4791-4800. DOI: 10.18653/v1/P19-1472. URL: https://aclanthology.org/P19-1472/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 396, + 563, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 396, + 563, + 419 + ], + "spans": [ + { + "bbox": [ + 70, + 396, + 563, + 419 + ], + "type": "text", + "content": "[98] Biao Zhang and Rico Sennrich. \"Root mean square layer normalization\". In: Advances in Neural Information Processing Systems 32 (2019)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 419, + 563, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 419, + 563, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 419, + 563, + 456 + ], + "type": "text", + "content": "[99] Hao Zhang, Alexander C Berg, Michael Maire, and Jitendra Malik. \"SVM-KNN: Discriminative nearest neighbor classification for visual category recognition\". In: 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06). Vol. 2. IEEE. 2006, pp. 2126-2136." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 750 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 261, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 261, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 261, + 86 + ], + "type": "text", + "content": "A Additional Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 100, + 564, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 564, + 233 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 564, + 233 + ], + "type": "text", + "content": "Modern Linear RNNs. Recent efforts aim to overcome Transformers quadratic cost and limitations in long-context modeling by designing efficient recurrent alternatives (Tiezzi et al. 2024), mainly due to fast inference and training of such models. The first generation of models—such as RetNet (Sun et al. 2023), LRU (Orvieto et al. 2023), RWKV (Peng et al. 2023), S5 (Smith et al. 2023), and S4 (Gu et al. 2022)—uses data-independent transition matrix mechanism with Hebbian-like update rule. The second generation of such models started to incorporate input-dependent parameters into such linear architectures (e.g., Griffin (De et al. 2024), SSMs (Behrouz et al. 2024b; Dao et al. 2024; Hasani et al. 2023), RWKV6 (Peng et al. 2024)), and/or use more expressive memory updating rule based on delta rule (Liu et al. 2024a; Peng et al. 2025b; Schlag et al. 2021; Yang et al. 2024a,c). The next generation of models, extend the memory architecture to deep models, while using delta-rule-like update rule (Sun et al. 2024), or momentum-based update rule (Behrouz et al. 2024c). Recently, to further enhance the performance of delta-rule-based sequence models, Siemens et al. (2025) suggest using multiple gradient descent update per token, resulting in more expressive sequence models in state tracking tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 238, + 564, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 238, + 564, + 288 + ], + "spans": [ + { + "bbox": [ + 67, + 238, + 564, + 288 + ], + "type": "text", + "content": "In addition to the above fast linear recurrent sequence models, several studies have focused on (interpretable) non-linear RNNs (Csordás et al. 2024; Gonzalez et al. 2024; Karami et al. 2025; Lim et al. 2024; Merrill et al. 2024; Schone et al. 2025; Von Oswald et al. 2023), and how their training can be faster (Gonzalez et al. 2024; Lim et al. 2024; Schone et al. 2025). However, due to the recurrent nature of such models, parallelizing them in larger scales is still challenging." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 296, + 564, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 296, + 564, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 296, + 564, + 370 + ], + "type": "text", + "content": "Fast Weight Programs. The idea of interpretation of linear layers as the key-value associative memory system backs to Hopfield networks (Hopfield 1982) and then fast weight programs, in which dynamic fast programs are incorporated into recurrent neural networks as writeable memory (Schlag et al. 2021; Schmidhuber 1992; Schmidhuber 1993). The two learning rules of Hebbian (Hebb 2005) and delta rule (Prados et al. 1989) are the most popular learning rules for them, which have been extensively explored in the literature (Irie et al. 2021; Munkhdalai et al. 2019, 2017; Schlag et al. 2021; Schmidhuber 1992; Yang et al. 2024a,c)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 379, + 564, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 379, + 564, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 379, + 564, + 441 + ], + "type": "text", + "content": "Test Time Training. The key ideas of learning at test time backs to early studies on local learning Bottou et al. 1992, in which each test data is trained on its neighbors before making a prediction (Gandelsman et al. 2022; Zhang et al. 2006). Later applying this idea on modern architectures, it has shown promising performance in diverse downstream tasks such as vision tasks (Jain et al. 2011; Mullapudi et al. 2019), video generation (Dalal et al. 2025), etc., mostly due to their ability to mitigate out-of-distribution samples." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 449, + 564, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 564, + 547 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 564, + 547 + ], + "type": "text", + "content": "Hopfield Networks. We build MIRAS based on the concept of associative memory in its broad form, where we learn an underlying mapping between keys and values. One of the earliest studies that discuss building neural architectures based on associative memory is Hopfield Networks (Hopfield 1982), in which associative memory is defined as the minimizing the energy function required to store keys and values. While traditional Hopfield networks has limited applicability in recent years (mainly due to limited capacity of vector-valued memory and energy function), several recent studies aim to improve their capacity by various techniques (Krotov 2021; Krotov et al. 2016; Li et al. 2024b), including extending the energy function of such models based on exponential kernels (Krotov et al. 2016; Lucibello et al. 2024), and discuss their connection to Transformers (Hu et al. 2024; Ramsauer et al. 2021)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 555, + 565, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 565, + 726 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 565, + 726 + ], + "type": "text", + "content": "Unifying Frameworks. In recent years, there have been growing efforts to understand the underlying mechanism of sequence models and unify (a subset of) them through a single perspective. Dao et al. (2024) present SSD framework to connect linear Transformers and (a subset of) linear recurrent models through the lens of associative operators and structured matrices. The SSD framework, however, is limited to models with vector or matrix-valued memory that are updated using a Hebbian-like update rules. Later, Liu et al. (2024a) present an online learning perspective on (a subset of) linear recurrent models. While this framework can also explain more expressive recurrent models based on delta rule, it is limited to online learners (i.e., models that optimize their internal associative memory using stochastic optimizers, such as stochastic gradient descent) with matrix-valued memory. Several modern sequence models, such as Transformers (Vaswani et al. 2017b) or Titans (Behrouz et al. 2024c) cannot be expressed in this framework. Sun et al. (2024) further provide a unifying perspective on how linear and softmax attention are respectively parametric and non-parametric solutions of (kernel) regression loss but consider other modern linear RNNs outside of this class of models, mainly due to limiting the objective to be regression loss. Recently, in a concurrent work to ours, Wang et al. (2025) also force models to have the same attentional bias objective and show that with additional simplification of modern RNNs (e.g., RetNet (Sun et al. 2023), Mamba (Dao et al. 2024)) they approximately place in the same class of models that internally optimize regression loss." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 310, + 742, + 322, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 742, + 322, + 752 + ], + "spans": [ + { + "bbox": [ + 310, + 742, + 322, + 752 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 564, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 564, + 133 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 564, + 133 + ], + "type": "text", + "content": "However, this simplification, fully changes the understanding of underlying update rules in these models. For example, contrary to Wang et al. (2025), MIRAS can distinguish models with Hebbian-like update (with dot product similarity) and delta rule update (with regression loss). Furthermore, all presented sequence models in this work (e.g., MONETA, MEMORA, YAAD) as well as models like HGRN2 (Qin et al. 2024) are placed outside of this class of models, due to their different attentional bias." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 151, + 251, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 151, + 251, + 167 + ], + "spans": [ + { + "bbox": [ + 69, + 151, + 251, + 167 + ], + "type": "text", + "content": "B Proof of Proposition 3.2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 175, + 523, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 175, + 523, + 187 + ], + "spans": [ + { + "bbox": [ + 68, + 175, + 523, + 187 + ], + "type": "text", + "content": "Here we present the proof of Proposition 3.2. For the sake of completeness, let us first re-state this Proposition." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "spans": [ + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": "Proposition 3.2. Let " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "\\eta_t = \\eta" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " and define " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "h_t(W) \\coloneqq \\sum_{i=1}^{t-1} \\widehat{\\ell}_i(W; \\mathbf{k}_i, \\mathbf{v}_i) + \\frac{1}{\\eta} R(W)" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": ". Assume " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "\\mathcal{W} = \\mathbb{R}^d" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " and the function " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "h_t(W)" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " is strictly convex in " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " and let " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_h(\\cdot, \\cdot)" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " be the Bregman divergence defined by function " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "h(\\cdot)" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_h(W, W') = h(W) - h(W') - \\langle \\nabla h(W'), W - W' \\rangle" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": ". Set " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "\\mathrm{Ret}_t(W, W') = \\mathcal{D}_h(W, W')" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "inline_equation", + "content": "\\widetilde{\\ell}_t(W; x_t) = \\widehat{\\ell}_t(W; x_t)" + }, + { + "bbox": [ + 68, + 192, + 563, + 243 + ], + "type": "text", + "content": " in (Learning-Retaining Viewpoint). Then, the update rule in (Learning-Retaining Viewpoint) is equivalent to the update rule in (FTRL Viewpoint)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "spans": [ + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "text", + "content": "Proof. Let " + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "inline_equation", + "content": "\\{\\widehat{W}_1, \\widehat{W}_2, \\ldots\\}" + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "text", + "content": " be the sequence of parameters obtained by (FTRL Viewpoint) and " + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "inline_equation", + "content": "\\{\\widetilde{W}_1, \\widetilde{W}_2, \\ldots\\}" + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "text", + "content": " be the sequence of parameters obtained by (Learning-Retaining Viewpoint). To show both update rules are equivalent, it suffices to show that the above two sequences are the same if they are initialized at the same point. We prove this statement by induction. First of all, since both sequences are initialized at the same point, the induction base is satisfied (i.e. " + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "inline_equation", + "content": "\\widetilde{W}_1 = \\widehat{W}_1" + }, + { + "bbox": [ + 68, + 253, + 563, + 313 + ], + "type": "text", + "content": "). Now, assume by induction hypothesis that" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 288, + 312, + 563, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 312, + 563, + 326 + ], + "spans": [ + { + "bbox": [ + 288, + 312, + 563, + 326 + ], + "type": "interline_equation", + "content": "\\widetilde {W} _ {t - 1} = \\widehat {W} _ {t - 1}. \\tag {33}", + "image_path": "5b943db11a74fd598a122c80f5abbba22c83b0d912d8b35b2bceeaf78cbd0309.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 329, + 561, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 329, + 561, + 352 + ], + "spans": [ + { + "bbox": [ + 68, + 329, + 561, + 352 + ], + "type": "text", + "content": "To complete the induction, we need to show " + }, + { + "bbox": [ + 68, + 329, + 561, + 352 + ], + "type": "inline_equation", + "content": "\\widetilde{W}_t = \\widehat{W}_t" + }, + { + "bbox": [ + 68, + 329, + 561, + 352 + ], + "type": "text", + "content": ". To this end, notice that, by (Learning-Retaining Viewpoint), we have" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 223, + 352, + 408, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 352, + 408, + 370 + ], + "spans": [ + { + "bbox": [ + 223, + 352, + 408, + 370 + ], + "type": "interline_equation", + "content": "\\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widetilde {\\ell} _ {t} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\operatorname {R e t} _ {t} (W, \\widetilde {W} _ {t - 1})", + "image_path": "ba50f238ce5f33290fa7048c1ffda6bc107d7d5d876a41aa2487a777d0ad0ae7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 373, + 467, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 373, + 467, + 385 + ], + "spans": [ + { + "bbox": [ + 69, + 373, + 467, + 385 + ], + "type": "text", + "content": "Using the choice of the Attentional Bias and the Retention function in the Proposition, we obtain" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 153, + 392, + 563, + 458 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 392, + 563, + 458 + ], + "spans": [ + { + "bbox": [ + 153, + 392, + 563, + 458 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widehat {\\ell_ {t}} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W) - \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (\\widetilde {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) \\tag {34} \\\\ - \\frac {1}{\\eta} R (\\widetilde {W} _ {t - 1}) - \\left\\langle \\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} (\\widetilde {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\nabla R (\\widetilde {W} _ {t - 1}), W - \\widetilde {W} _ {t - 1} \\right\\rangle . \\\\ \\end{array}", + "image_path": "46351f9f08e7407d5c80ee9f0ffc73c06e03ccc5f3563dfd30ce8a79ae29a1b7.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 463, + 381, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 463, + 381, + 475 + ], + "spans": [ + { + "bbox": [ + 69, + 463, + 381, + 475 + ], + "type": "text", + "content": "Ignoring the constant terms and using the induction hypothesis (33), we get" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 179, + 481, + 563, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 481, + 563, + 548 + ], + "spans": [ + { + "bbox": [ + 179, + 481, + 563, + 548 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\widehat {\\ell_ {t}} (W, \\mathbf {k} _ {t}, \\mathbf {v} _ {t}) + \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W) \\tag {35} \\\\ - \\left\\langle \\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} (\\widehat {W} _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\nabla R (\\widehat {W} _ {t - 1}), W - \\widehat {W} _ {t - 1} \\right\\rangle . \\\\ \\end{array}", + "image_path": "680672e1480e5ce8fc52ca864b5c92b471c769c034455b7a0f12484f7a574c70.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 553, + 463, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 553, + 463, + 567 + ], + "spans": [ + { + "bbox": [ + 68, + 553, + 463, + 567 + ], + "type": "text", + "content": "On the other hand, recall that " + }, + { + "bbox": [ + 68, + 553, + 463, + 567 + ], + "type": "inline_equation", + "content": "\\{\\widehat{W}_1,\\widehat{W}_2,\\ldots \\}" + }, + { + "bbox": [ + 68, + 553, + 463, + 567 + ], + "type": "text", + "content": " is obtained by (FTRL Viewpoint). Therefore, we have" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 222, + 573, + 409, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 573, + 409, + 604 + ], + "spans": [ + { + "bbox": [ + 222, + 573, + 409, + 604 + ], + "type": "interline_equation", + "content": "\\widehat {W} _ {t - 1} = \\arg \\min _ {W} \\sum_ {i = 1} ^ {t - 1} \\widehat {\\ell_ {i}} (W; \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} \\mathcal {R} _ {t} (W).", + "image_path": "0a9395ce519dd9a000f0c09fac3e6abdff1a487612ca4195b36605a37a2e84ac.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 609, + 131, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 609, + 131, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 609, + 131, + 620 + ], + "type": "text", + "content": "Thus, we have" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 236, + 620, + 563, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 620, + 563, + 651 + ], + "spans": [ + { + "bbox": [ + 236, + 620, + 563, + 651 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {t - 1} \\nabla \\widehat {\\ell_ {i}} \\left(W _ {t - 1}, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}\\right) + \\frac {1}{\\eta} \\nabla R \\left(W _ {t - 1}\\right) = 0. \\tag {36}", + "image_path": "64cf270ae5ece253cd40baa97d7402eff0854d77b5d2ddd359c436d288230b19.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 654, + 215, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 654, + 215, + 666 + ], + "spans": [ + { + "bbox": [ + 69, + 654, + 215, + 666 + ], + "type": "text", + "content": "Combining (36) and (35), we obtain" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 228, + 673, + 402, + 703 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 673, + 402, + 703 + ], + "spans": [ + { + "bbox": [ + 228, + 673, + 402, + 703 + ], + "type": "interline_equation", + "content": "\\widetilde {W} _ {t} = \\arg \\min _ {W} \\quad \\sum_ {i = 1} ^ {t} \\widehat {\\ell_ {i}} (W, \\mathbf {k} _ {i}, \\mathbf {v} _ {i}) + \\frac {1}{\\eta} R (W).", + "image_path": "c52a53d3147a32bad3e24c86f4cf922d7462dfb8a43a44f7ab5bc53d1e1fcd3f.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 710, + 273, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 710, + 273, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 710, + 273, + 723 + ], + "type": "text", + "content": "This implies " + }, + { + "bbox": [ + 69, + 710, + 273, + 723 + ], + "type": "inline_equation", + "content": "\\widetilde{W}_t = \\widehat{W}_t" + }, + { + "bbox": [ + 69, + 710, + 273, + 723 + ], + "type": "text", + "content": ", which completes the proof." + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 553, + 713, + 561, + 721 + ], + "blocks": [ + { + "bbox": [ + 553, + 713, + 561, + 721 + ], + "lines": [ + { + "bbox": [ + 553, + 713, + 561, + 721 + ], + "spans": [ + { + "bbox": [ + 553, + 713, + 561, + 721 + ], + "type": "image", + "image_path": "3329723ab8e514721e400978c83f650752b2cf21d4fc36cede7c1a33e9e7b66c.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 310, + 742, + 321, + 751 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 226, + 88 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 226, + 88 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 226, + 88 + ], + "type": "text", + "content": "C Experimental Setup" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 95, + 564, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 95, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 67, + 95, + 564, + 168 + ], + "type": "text", + "content": "We perform experimental evaluation on the language modeling (Merit et al. 2017; Paperno et al. 2016), common-sense reasoning (Bisk et al. 2020; Clark et al. 2019; Clark et al. 2018; Sakaguchi et al. 2021; Zellers et al. 2019), and long context needle-in-haystack tasks (Hsieh et al. 2024). We compare our models with the state-of-the-art linear recurrent models, Transformers, and hybrid models (recurrent + attention). More specifically we compare with Transformer++ (Touvron et al. 2023), RetNet (Sun et al. 2023), Gated Linear Attention (GLA) (Yang et al. 2024b), Mamba (Gu et al. 2024), Mamba2 (Dao et al. 2024), DeltaNet (Yang et al. 2024c), TTT (Sun et al. 2024), and Gated DeltaNet (Yang et al. 2024a)." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 195, + 198, + 438, + 267 + ], + "blocks": [ + { + "bbox": [ + 252, + 178, + 378, + 189 + ], + "lines": [ + { + "bbox": [ + 252, + 178, + 378, + 189 + ], + "spans": [ + { + "bbox": [ + 252, + 178, + 378, + 189 + ], + "type": "text", + "content": "Table 5: Architectural Details." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 195, + 198, + 438, + 267 + ], + "lines": [ + { + "bbox": [ + 195, + 198, + 438, + 267 + ], + "spans": [ + { + "bbox": [ + 195, + 198, + 438, + 267 + ], + "type": "table", + "html": "
ModelBlockDimHeadPeak LRToken
170M12768163e-315B
340M241024161.5e-315B
780M241536161.25e-330B
", + "image_path": "45c18238da20b86ece6c49f73c0da7ab6603bbe33993974acf302e629ba56a20.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "spans": [ + { + "bbox": [ + 311, + 742, + 321, + 751 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_content_list.json b/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2a257735ec14675d632df9c9c052b5eda4af5d78 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_content_list.json @@ -0,0 +1,2687 @@ +[ + { + "type": "text", + "text": "Novel Demonstration Generation with Gaussian Splitting Enables Robust One-Shot Manipulation", + "text_level": 1, + "bbox": [ + 101, + 71, + 895, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sizhe Yang\\*,1,2 Wenye $\\mathrm{Yu}^{*,1,3}$ Jia Zeng $^{1}$ Jun Lv $^{3}$ Kerui Ren $^{1,3}$", + "bbox": [ + 233, + 157, + 754, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cewu Lu $^{3}$ Dahua Lin $^{1,2}$ Jiangmiao Pang $^{1,\\dagger}$", + "bbox": [ + 321, + 176, + 666, + 193 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Shanghai AI Laboratory $^{2}$ The Chinese University of Hong Kong", + "bbox": [ + 243, + 193, + 748, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Shanghai Jiao Tong University", + "bbox": [ + 377, + 210, + 612, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "* Equal contributions † Corresponding author", + "bbox": [ + 320, + 227, + 676, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project page: https://yangsizhe.github.io/robosplat/", + "bbox": [ + 307, + 244, + 678, + 261 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b41581ae16497916fa7f0a8c905149a917be9de8c31d49423683b8755835e764.jpg", + "image_caption": [ + "Novel Demonstration Generation" + ], + "image_footnote": [], + "bbox": [ + 81, + 284, + 483, + 382 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e88c849405e5c7727046c30ea2b953484ed9a565c0b49f67a68419533bbd9545.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 405, + 205, + 482 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ad8a63afe86e54c84c472d26fc0ff1ed8c976194f0e749cdf2deb8a3d08d55a2.jpg", + "image_caption": [ + "Fig. 1: Starting from a single expert demonstration and multi-view images, our method generates diverse and visually realistic data for policy learning, enabling robust performance across six types of generalization in the real world. Compared to previous 2D data augmentation methods, our approach achieves significantly better results across various generalization types. Notably, we achieve this within a unified framework." + ], + "image_footnote": [], + "bbox": [ + 94, + 483, + 205, + 556 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/32f802e7e30eab9ebb3a512bc0d66bfde93a7cfed378087e78ecc83db282745b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 215, + 406, + 326, + 481 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/651c8579ed255e83ff43a3e35df049e75c3f7ca851ccbfa6c751c1ccb609b141.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 215, + 482, + 326, + 556 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f3eaa222de3279e0d39362bc71e285457b8dcac0a80024bc991bb38326d20e19.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 405, + 447, + 481 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c5c09f64051cd724f7b3e6312ccfdddac8aea763c0c913815e3c6e8c9607d583.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 482, + 447, + 556 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/54159b48155d2bc94fb982430141147332c7daf413cded000d9ad7a9a5c27bfe.jpg", + "image_caption": [ + "Generalization" + ], + "image_footnote": [], + "bbox": [ + 486, + 300, + 620, + 378 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/81cb33fba68552587a99e632f725c1fd0106b2bf17cec9b502a1101e7d1927c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 378, + 619, + 455 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5d09305c79d9a9ea490929ca7f29f0ac25de87e3b285cb4657c67099cc780c0e.jpg", + "image_caption": [ + "Training Data Source:" + ], + "image_footnote": [], + "bbox": [ + 486, + 457, + 620, + 535 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ffbe554629ab2c7fde2741f4a6ad747dff67168c471d6998a60a66393fb36830.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 325, + 689, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6c712ee580435498113446ac682efcc00cab680a3b4f15bd27073ade5c0eb34a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 407, + 694, + 444 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6bd1b346120cd70446996e5df9bbe70575020c94491592e0f4020894b7c819bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 300, + 833, + 378 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bfa84d627f027b5cabf7370abddf61fa5b720604e608b37159e90f00ff8370c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 378, + 833, + 455 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e1c1d0890a88776f034814686b64841cb1e31357eba2629bd3995babb7dd23ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 459, + 694, + 523 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/921706bfd91926b2ddfacffade625fe78c513762e6ee3133e762d51a20542847.jpg", + "image_caption": [ + "Manually Collected + Previous 2D Augmentation" + ], + "image_footnote": [], + "bbox": [ + 697, + 458, + 833, + 534 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/35d892719a6309036eb90f1987157059a3f1914422d61cb54e28d76a70dda9bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 839, + 325, + 908, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0c6dd535670c9fee89fab0af96d59af261a5c6532651dadedc7cbe5d9a74f3f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 839, + 407, + 908, + 444 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/cbbee3983b4fa45dcfa7ca8c7ae0abebe680859c6470cb42bd0fe8d571a960d1.jpg", + "image_caption": [ + "Manually Collected Ours (Generated)" + ], + "image_footnote": [], + "bbox": [ + 839, + 483, + 908, + 523 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Visuomotor policies learned from teleoperated demonstrations face challenges such as lengthy data collection, high costs, and limited data diversity. Existing approaches address these issues by augmenting image observations in RGB space or employing Real-to-Sim-to-Real pipelines based on physical simulators. However, the former is constrained to 2D data augmentation, while the latter suffers from imprecise physical simulation caused by inaccurate geometric reconstruction. This paper introduces RoboSplat, a novel method that generates diverse, visually realistic demonstrations by directly manipulating 3D Gaussians. Specifically, we reconstruct the scene through 3D Gaussian Splatting (3DGS), directly edit the reconstructed scene, and augment data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types. Comprehensive real-world experiments demonstrate that RoboSplat significantly enhances the generalization of visuomotor policies under diverse disturbances. Notably, while policies trained on hundreds of real-world demonstrations with additional", + "bbox": [ + 73, + 635, + 491, + 912 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2D data augmentation achieve an average success rate of $57.2\\%$ , RoboSplat attains $87.8\\%$ in one-shot settings across six types of generalization in the real world.", + "bbox": [ + 503, + 635, + 921, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 645, + 685, + 779, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Imitation learning for visuomotor policies has emerged as a promising paradigm in robot manipulation. However, policies learned through imitation often display limited robustness in deployment scenarios that differ substantially from expert demonstrations, primarily due to insufficient coverage of visual domains in the training data. Increasing the volume and diversity of real-world data is an effective strategy for enhancing robustness [12]; however, acquiring human-collected demonstrations is prohibitively time-consuming and labor-intensive. Consequently, substantial efforts have been devoted to generating diverse expert data without engaging with real-world environments [68, 69, 49, 8, 10, 67, 9, 35, 50, 59].", + "bbox": [ + 501, + 704, + 919, + 885 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Simulated environments offer a low-cost platform for data synthesis [49, 69]. However, the Sim-to-Real gap presents", + "bbox": [ + 503, + 886, + 921, + 917 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13175v1 [cs.RO] 17 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "significant challenges that hinder policy performance in real-world scenarios. Although Real-to-Sim-to-Real pipelines can narrow this gap considerably, replicating real-world manipulation scenes in simulation remains complex and labor-intensive. In particular, inaccuracies in geometric reconstructions often lead to imprecise physical simulations. Moreover, existing Real-to-Sim-to-Real approaches primarily generate data within monotonously reconstructed scenes, resulting in policies that are tailored only to those specific environments. Another line of work sheds light on augmenting image observations for better visual generalization. By editing different semantic parts of the image, these approaches generate novel scene configurations, in terms of background appearances [68, 9, 67, 10], embodiment types [8], object types [67], and camera views [50]. While these image augmentation methods are convenient, their limited consideration of 3D spatial information results in spatially inaccurate data generation. For more effective data augmentation, explicit 3D representations that retain accurate spatial information and are realistically renderable are required.", + "bbox": [ + 76, + 70, + 488, + 372 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, 3D Gaussian Splatting (3DGS) [25] has become a burgeoning approach to superior reconstruction and rendering. Thanks to its explicit representation of the scene, 3DGS enables interpretable editing of the reconstructed scene, which paves the way for generating novel manipulation configurations. Furthermore, as a 3D representation of the scene, 3DGS retains spatial information from the real world and allows for consistent rendering from multiple perspectives, which makes it the real-world counterpart of a simulator's graphics engine for generating novel demonstrations.", + "bbox": [ + 76, + 372, + 488, + 523 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on that, we propose RoboSplat, a novel and efficacious approach to demonstration generation with Gaussian Splitting. Empowered by 3DGS, we achieve a high-fidelity reconstruction of the manipulation scene. In order to align the reconstructed scene with real-world counterparts, we devise a novel frame alignment pipeline leveraging differentiable rendering of Gaussian Splitting. 3D Gaussians of different scene components are segmented using off-the-shelf segmentation models and the robot United Robotics Description Format (URDF). Remarkably, as illustrated in Fig. 1, a single collected expert trajectory enables us to generate novel demonstrations across a wide range of visual domains. To be specific, RoboSplat augments data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types.", + "bbox": [ + 76, + 523, + 488, + 810 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Compared to previous Real-to-Sim-to-Real and image augmentation approaches, RoboSplat achieves more diverse and spatially accurate data generation. Extensive real-world experiments demonstrate that RoboSplat significantly enhances the robustness of visuomotor policies against multiple disturbances across tasks involving pick and place, tool use, functional motion, articulated object manipulation, and long", + "bbox": [ + 76, + 811, + 488, + 917 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "horizon skills. Specifically, compared to policies trained on hundreds of real-world demonstrations that are further enriched with 2D data augmentation, our method increases the average success rate from $57.2\\%$ to $87.8\\%$ .", + "bbox": [ + 506, + 71, + 919, + 130 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. RELATED WORK", + "text_level": 1, + "bbox": [ + 645, + 147, + 782, + 160 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Generalizable Policy in Robot Manipulation", + "text_level": 1, + "bbox": [ + 506, + 171, + 826, + 186 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in manipulation have significantly enhanced generalization. Some studies design the policy architecture to endow it with equivariant properties, which is helpful to generalizing to different object poses [60, 61, 43, 13]. One-shot imitation learning approaches like [54, 48, 6, 53, 70] enable the policy to handle various object poses given only one demonstration. Furthermore, some other work focuses on generalizing the policy to different camera views [69, 46, 63], scene appearance [30, 51], and embodiments [12]. Some studies exploit the power of Large Language Models (LLMs) and Vision Language Models (VLMs) to endow robots with generalization abilities [23, 7, 39, 14]. Instead of adopting generalizable policy architecture, auxiliary learning objectives and powerful foundation models, our work is concentrated on generating high-quality, diverse, and realistic data to instill generalization abilities to the learned policy.", + "bbox": [ + 506, + 193, + 919, + 434 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B. Data Augmentation for Policy Learning", + "text_level": 1, + "bbox": [ + 506, + 450, + 795, + 464 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given limited training data, data augmentation emerges as a way to improve the robustness of the policy. Previous work adopts image augmentation techniques to improve the resistance of visuomotor policies to observation noises [29, 28, 36, 37, 15, 19, 20]. However, these methods are mainly evaluated in simulated environments. To deploy learned policies in real-world setting, some previous work focuses on augmenting the appearance of the scene by incorporating image-inpainting models [67, 10, 9, 35]. Moreover, Tian et al. [50] generate augmented task demonstrations from different camera views and aim to learn a view-invariant policy. Ameperosa et al. [3]. Chen et al. [8] further devise a cross-embediment pipeline by inpainting different robots to image observations. Nonetheless, these studies mainly augment task demonstrations on 2D images, which lack spatial information. Hence, only limited augmentation can be achieved, and the augmented demonstrations might be unrealistic compared to those generated directly from 3D representations. Our work reconstructs the scene with 3D Gaussian Splatting and edits the 3D representation for data augmentation, enabling our policy to achieve comprehensive generalization across object poses, object types, camera views, lighting conditions, scene appearance, and various embodiments..", + "bbox": [ + 506, + 472, + 919, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "C. Gaussian Splitting in Robotics", + "text_level": 1, + "bbox": [ + 506, + 834, + 738, + 848 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Gaussian Splatting (3DGS) [25] serves as an explicit radiance field representation for real-time rendering of 3D scenes. Previous work leverages 3DGS to select proper grasp poses [24, 71]. Furthermore, Lu et al. [34] exploit 3DGS to", + "bbox": [ + 506, + 856, + 919, + 917 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/47ce93682b0b588d7c62b851e04f002ea8def19b6f3b84c40cf9f6e071ca61ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 89, + 68, + 697, + 252 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9e2a04949ec35b9429c9369bf9c1e653d07dfe421af7984acd97fbe9fab0f4b9.jpg", + "image_caption": [ + "Fig. 2: Method overview. We start from a single manually collected demonstration and multi-view images that capture the whole scene. The former provides task-related keyframes, while the latter helps scene reconstruction. After aligning the reconstructed frame with the real-world frame and segmenting different scene components, we carry out autonomous editing of the scene in pursuit of six types of augmentation." + ], + "image_footnote": [], + "bbox": [ + 94, + 253, + 697, + 404 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/58b7becf4a69a1455733dc9abbcd8d4f00caa882618621bd9e10cdabb7818fb2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 73, + 903, + 404 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "construct dynamics of the scene for multi-task robot manipulation. In order to predict the consequence of robots' interactions with the environment, Shorinwa et al. [47] leverage 3D semantic masking and infilling to visualize the motions of the objects that result from the interactions. Another line of work adopts the Real-to-Sim-to-Real pipeline, and utilizes 3DGS to reconstruct the real-world scene [31, 40, 56, 52]. However, importing reconstructed real-world objects to simulation is a strenuous process, and physical interactions tend to suffer from large sim-to-real gaps due to the flawed geometric reconstruction and lack of physical information in 3D reconstruction. Some recent work on 3DGS is centered around editing and relighting of the scene [65, 32, 17]. Our method enables autonomous editing of the reconstructed scene to generate diverse demonstrations with various configurations.", + "bbox": [ + 73, + 489, + 490, + 717 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "III. PRELIMINARIES", + "text_level": 1, + "bbox": [ + 209, + 728, + 356, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Gaussian Splatting (3DGS) [25] utilizes multi-view images for high-fidelity scene reconstruction. The scene is represented by a set of Gaussians $\\{g_i\\}_{i=1}^N$ , where each Gaussian $g_i$ consists of a position vector $\\mu_i \\in \\mathbb{R}^3$ , a rotation matrix $R_i \\in \\mathbb{R}^{3 \\times 3}$ , a scaling matrix $S_i = \\text{diag}(s)(s \\in \\mathbb{R}^3)$ , an opacity factor $\\alpha_i \\in \\mathbb{R}$ , and spherical harmonic coefficients $c_i$ that encapsulate the view-dependent color appearance of the Gaussian. Given the scaling matrix and rotation matrix, the covariance matrix $\\Sigma_i$ is calculated as follows:", + "bbox": [ + 73, + 750, + 490, + 885 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Sigma_ {i} = R _ {i} S _ {i} S _ {i} ^ {\\top} R _ {i} ^ {\\top}.\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 898, + 346, + 917 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To derive the color $C$ of a particular pixel during rendering procedure, 3DGS exploits a typical neural point-based approach, similar to Kopanas et al. [27], where the final color value is calculated as follows:", + "bbox": [ + 503, + 489, + 921, + 550 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nC = \\sum_ {i = 1} ^ {N} c _ {i} o _ {i} \\prod_ {j = 1} ^ {j = i - 1} (1 - o _ {j}),\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 569, + 805, + 609 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\no _ {i} = \\alpha_ {i} \\cdot \\exp \\left(\\frac {1}{2} \\delta_ {i} ^ {\\intercal} \\Sigma_ {i, 2 D} ^ {- 1} \\delta_ {i}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 612, + 807, + 641 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $N$ is the number of Gaussians that overlap with the pixel. Besides, $\\alpha_{i}$ denotes the opacity of the $i$ -th Gaussian. $\\delta_{i} \\in \\mathbb{R}^{2}$ denotes the offset between the current pixel and the center of the $i$ -th Gaussian projected to 2D image. $\\Sigma_{i,2D} \\in \\mathbb{R}^{2 \\times 2}$ stands for the covariance matrix of the $i$ -th Gaussian projected to 2D image.", + "bbox": [ + 503, + 645, + 921, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IV. METHODOLOGY", + "text_level": 1, + "bbox": [ + 638, + 744, + 785, + 758 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To generate high-fidelity and diverse data from a single expert trajectory, we present RoboSplat, a novel demonstration generation approach based on 3DGS. An overview of our method is shown in Fig. 2. In this section, we describe RoboSplat in detail. We begin with the process of reconstruction and preprocessing in Sec. IV-A, which includes object and scene reconstruction, frame alignment with differentiable rendering, and novel pose generation for the robot and objects. With all the Gaussian models ready, we generate novel demonstrations and perform data augmentation in terms of object", + "bbox": [ + 501, + 765, + 921, + 917 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "poses, object types, camera views, scene appearance, lighting conditions, and embodiments, as described in Sec. IV-B. Finally, a visuomotor policy is trained on the augmented demonstrations and directly deployed on real robots, as detailed in Sec. IV-C.", + "bbox": [ + 73, + 71, + 491, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A. Reconstruction and Preprocessing", + "text_level": 1, + "bbox": [ + 73, + 157, + 331, + 172 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In pursuit of a high-fidelity reconstruction of the scene, we first capture a set of RGB images whose corresponding viewpoints should be as various as possible. During this process, the scene remains static and the robot is fixed at its default joint configuration, which we refer to as $q_{\\mathrm{default}}$ . With the images ready, we utilize COLMAP [45, 44] to obtain a sparse scene reconstruction and an estimation of the camera pose corresponding to each image. To further enhance the reconstruction precision, we gain an depth estimation for each image with Depth Anything [62]. The images, camera poses, and depth prior serve as inputs to 3DGS [25], which returns 3D Gaussians representing the entire scene $\\mathcal{G}_{\\mathrm{scene}}$ , which contains 3D Gaussians corresponding to the robot, dubbed $\\mathcal{G}_{\\mathrm{robot}}$ .", + "bbox": [ + 73, + 176, + 491, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, the reconstructed 3D Gaussians of the robot are represented in an arbitrary frame $\\mathcal{F}_{\\mathrm{scene}}$ , and hence we need to align it with the real-world coordinate frame $\\mathcal{F}_{\\mathrm{real}}$ to facilitate automated editing.", + "bbox": [ + 73, + 375, + 491, + 434 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The robot URDF gives us access to the robot base frame $\\mathcal{F}_{\\mathrm{URDF}}$ . The real-world robot frame $\\mathcal{F}_{\\mathrm{robot}}$ , $\\mathcal{F}_{\\mathrm{URDF}}$ , and $\\mathcal{F}_{\\mathrm{real}}$ are all aligned with each other. Hence, the actual problem turns into the frame alignment from $\\mathcal{F}_{\\mathrm{scene}}$ to $\\mathcal{F}_{\\mathrm{URDF}}$ . We denote the transformation matrix as $\\mathcal{T}_{\\mathrm{URDF, scene}}$ . While point cloud registration approaches, such as Iterative Closest Point (ICP) [5], serve as a common solution to it, we find that there is still major misalignment between the two frames aligned with point cloud registration, as illustrated in Fig. 3. The reason lies in the fact that point cloud registration is based on point coordinates, whereas 3D Gaussians have a scale attribute, which causes a mismatch between point coordinates and the appearance. Therefore, we exploit the differentiable rendering of 3DGS to do further fine-grained alignment, as depicted in Fig. 4.", + "bbox": [ + 73, + 435, + 491, + 660 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Suppose $\\hat{\\mathcal{T}}_{\\mathrm{URDF, scene}}^{0}$ is the initial transformation matrix obtained through ICP. We first apply $\\hat{\\mathcal{T}}_{\\mathrm{URDF, scene}}^{0}$ to $\\mathcal{G}_{\\mathrm{robot}}$ leading to a partially aligned robot Gaussian $\\hat{\\mathcal{G}}_{\\mathrm{robot}}$ . The aim of further alignment is to derive another transformation matrix $\\hat{\\mathcal{T}}_{\\mathrm{rel}}$ , such that applying $\\hat{\\mathcal{T}}_{\\mathrm{rel}}$ to $\\hat{\\mathcal{G}}_{\\mathrm{robot}}$ gives a better alignment to the pose of the robot defined in URDF. For this sake, we select $N$ canonical camera views to capture the segmentation masks $\\{\\mathcal{I}_i^{\\mathrm{URDF}}\\}_{i = 1}^N$ and $\\{\\mathcal{I}_i^{\\mathrm{Gaussian}}\\}_{i = 1}^N$ (the pixel value is 1 if it belongs to the robot; otherwise, it is 0) with the robot URDF and $\\hat{\\mathcal{G}}_{\\mathrm{robot}}$ respectively. The pixel-wise differences between the images from the same canonical views are averaged to form the objective function of alignment:", + "bbox": [ + 73, + 661, + 491, + 845 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {a l i g n}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left(\\mathcal {I} _ {i} ^ {\\text {U R D F}} - \\mathcal {I} _ {i} ^ {\\text {G a u s s i a n}}\\right) ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 160, + 854, + 406, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Due to the differentiability of Gaussian Splitting, we can", + "bbox": [ + 91, + 901, + 491, + 917 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1a73a53de5cf7d5969a3255354730536448b391b83ca1ad3bb975821256acbe8.jpg", + "image_caption": [ + "Fig. 3: Comparison of frame alignment results between ICP and fine-grained optimization with differentiable rendering. The semi-transparent orange overlay represents the ground truth rendered with URDF from the same camera view. The left shows the results of ICP, which have larger errors, while the right shows the results after further fine-grained optimization using differentiable rendering." + ], + "image_footnote": [], + "bbox": [ + 563, + 64, + 870, + 191 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b14d38a8e57726af6c838d49b428567bd4e1a21daa955cdaf7fc7f49c8370334.jpg", + "image_caption": [ + "Fig. 4: Illustration of frame alignment with differentiable rendering. The loss is calculated between the mask rendered using Gaussian Splatting and the mask rendered with URDF. Subsequently, backpropagation and gradient descent are used to optimize the translation, rotation, and scale, which are then applied to the 3D Gaussians." + ], + "image_footnote": [], + "bbox": [ + 506, + 319, + 919, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "rewrite the objective function as $\\mathcal{L}_{\\mathrm{align}}(\\hat{T}_{\\mathrm{rel}})$ and optimize $\\hat{T}_{\\mathrm{rel}}$ through gradient descent. The optimized $\\hat{T}_{\\mathrm{rel}}$ is composed with $\\hat{T}_{\\mathrm{URDF, scene}}^{0}$ , the result of which is applied to $\\mathcal{G}_{\\mathrm{scene}}$ to form the scene reconstruction in $\\mathcal{F}_{\\mathrm{real}}$ . We refer to the aligned 3D Gaussians as $\\mathcal{G}_{\\mathrm{scene}}^{*}$ .", + "bbox": [ + 503, + 626, + 919, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to decompose the scene into different parts, we first leverage Grounded-SAM [41] to perform task-related object segmentation. Then, the masked images are used to reconstruct 3D Gaussians for the objects. The 3D Gaussians corresponding to each link of the robot are segmented using the point cloud of each link in $\\mathcal{F}_{\\mathrm{URDF}}$ , which can be obtained with the robot's URDF and the renderer. Specifically, if the position of a 3D Gaussian is within a threshold distance from the point cloud of a link, the 3D Gaussian is assigned to that link. If a 3D Gaussian does not belong to any object or any link of the robot, it is classified as background. We suppose that the robot has $l$ links and there are totally $k$ objects in the scene. The reconstructed robot links, objects, and background are denoted as $\\mathcal{G}_{\\mathrm{robot}}^* = \\{\\mathcal{G}_{\\mathrm{robot},i}^*\\}_{i=1}^l$ , $\\mathcal{G}_{\\mathrm{obj}}^* = \\{\\mathcal{G}_{\\mathrm{obj},j}^*\\}_{j=1}^k$ , and $\\mathcal{G}_{\\mathrm{bg}}^*$", + "bbox": [ + 501, + 704, + 921, + 919 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "respectively.", + "bbox": [ + 73, + 71, + 161, + 85 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to our frame alignment strategy, we utilize differentiable rendering to estimate the deployed camera poses in order to narrow the gap between the generated data and the deployment environment. The camera extrinsics are optimized through gradient descent, with the optimization objective:", + "bbox": [ + 73, + 85, + 491, + 161 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {c a m e r a}} = S S I M \\left(\\mathcal {I} _ {\\text {E x p e r t}}, \\mathcal {I} _ {\\text {G a u s s i a n}}\\right) ^ {2},\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 176, + 406, + 194 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{I}_{\\mathrm{Expert}}$ denotes the image obtained from the collected expert demonstration, $\\mathcal{I}_{\\mathrm{Gaussian}}$ represents the rendered image with reconstructed 3D Gaussians, and SSIM refers to Structural Similarity, which measures the perceptual similarity between two images.", + "bbox": [ + 73, + 200, + 490, + 276 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Nonetheless, before moving on to novel demonstration generation, we need to figure out how to generate 3D Gaussians for the robot under novel joint configurations. To achieve that, we leverage the link-wise Gaussians $\\{\\mathcal{G}_{\\mathrm{robot},i}^{*}\\}_{i = 1}^{l}$ and the default joint configuration $q_{\\mathrm{default}}$ . For each link $1 \\leqslant i \\leqslant l$ , we access its relative pose to robot base frame under arbitrary joint configuration $q$ through forward kinematics, denoted as $\\mathcal{T}_{\\mathrm{fk}}^i(q)$ . Hence, by transforming each link $i$ with $\\mathcal{T}_{\\mathrm{fk}}^i(q)\\mathcal{T}_{\\mathrm{fk}}^i(q_{\\mathrm{default}})^{-1}$ , we derive the corresponding 3D Gaussians under configuration $q$ . The entire 3D Gaussians are thereby derived by composing Gaussians of all $l$ links. As for the manipulated objects, we apply transformations in a similar manner. The way 3D Gaussians are transformed is detailed in Appendix A.", + "bbox": [ + 73, + 277, + 491, + 474 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B. Novel Demonstration Generation", + "text_level": 1, + "bbox": [ + 73, + 486, + 323, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Utilizing 3D Gaussians in $\\mathcal{F}_{\\mathrm{real}}$ , we implement our demonstration augmentation process, which systematically enhances the expert demonstration $\\mathcal{D}_{\\mathrm{expert}}$ across six aspects: object poses, object types, camera views, embodiment types, scene appearance, and lighting conditions.", + "bbox": [ + 73, + 506, + 490, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1) Object Pose", + "text_level": 1, + "bbox": [ + 93, + 583, + 199, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To perform object pose augmentation, we first extract keyframes from the expert demonstration using a heuristic approach. Whenever the gripper action toggles or joint velocities approach zero, we consider the current time step as a keyframe and record the end-effector pose with respect to robot base frame. After that, we apply rigid transformations to the target objects that are involved in the expert demonstration. The end-effector poses at keyframes are transformed equivariantly according to the target object. Eventually, we generate trajectories between consecutive keyframe poses with motion planning, the combination of which makes a complete augmented demonstration with novel object poses.", + "bbox": [ + 73, + 598, + 490, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2) Object Type", + "text_level": 1, + "bbox": [ + 89, + 780, + 199, + 795 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The object types can be augmented with 3D Content Generation. We first prompt GPT-4 [2] to generate approximately 50 names of objects that can be grasped. Then, we use these object names as prompts to generate corresponding 3D Gaussians with a 3D content generation model [57]. We utilize an off-the-shelf grasping algorithm [16] to generate grasp poses with respect to the object frame. As we generate different object poses for augmentation, we obtain the corresponding", + "bbox": [ + 73, + 795, + 491, + 917 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "end-effector poses by composing object pose and the grasp pose relative to the object, which turn into the keyframe poses in new demonstrations. The entire augmented trajectory is generated in the same manner as IV-B1.", + "bbox": [ + 501, + 71, + 921, + 131 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3) Camera View", + "text_level": 1, + "bbox": [ + 519, + 137, + 638, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "One merit of 3DGS lies in its ability to perform novel view synthesis. Thereby, we are able to choose different camera poses from $\\mathcal{D}_{\\mathrm{expert}}$ and obtain novel-view demonstrations. Although we can render novel-view observations from arbitrary camera pose, we need to ensure that the augmented camera view does not deviate so much from the expert that it loses sight of the manipulation scene. Hence, we first designate a target point $O_{c} = (x_{c},y_{c},z_{c})$ in $\\mathcal{F}_{\\mathrm{real}}$ , towards which the camera should face during the entire episode. We then define a coordinate frame $\\mathcal{F}_c$ , whose origin is $O_{c}$ and orientation is the same as $\\mathcal{F}_{\\mathrm{real}}$ . The position of camera is represented by spherical coordinates $(r,\\theta ,\\varphi)$ in $\\mathcal{F}_c$ . Thus, by limiting the target point within the manipulation scene and randomizing the spherical coordinates, we are able to generate camera poses that produce meaningful observations yet possess diversity. The hyperparameters of randomization for the target point and the spherical coordinates are detailed in Appendix B.", + "bbox": [ + 501, + 155, + 921, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4) Embodiment Type", + "text_level": 1, + "bbox": [ + 519, + 419, + 668, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To generalize the expert demonstration to different types of robots, we replace $\\mathcal{G}_{\\mathrm{robot}}^*$ with the 3D Gaussians of another embodiment, dubbed $\\mathcal{G}_{\\mathrm{robot}}^{\\mathrm{new}}$ , which is attained from the corresponding URDF file or real-world reconstruction. The keyframe end-effector poses are reused because they are embodiment-agnostic action representations. Hence, through motion planning, we can easily derive the end-effector poses and joint positions of the new embodiment for all time steps in augmented demonstrations. The 3D Gaussians of the new embodiment under novel joint configurations is obtained from $\\mathcal{G}_{\\mathrm{robot}}^{\\mathrm{new}}$ as mentioned in Sec. IV-A. The policy trained on these augmented demonstrations is directly deployed on novel embodiments.", + "bbox": [ + 501, + 438, + 921, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5) Scene Appearance", + "text_level": 1, + "bbox": [ + 519, + 640, + 669, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inconsistency between scene appearance accounts for a large visual gap between training and deployment environments. To resolve this issue, we propose to exploit reconstructed diverse 3D scenes and also large-scale image datasets to augment the scene appearance. We adopt COCO [33] as the image dataset, and attach images to the table top and background 3D Gaussian planes that surround the entire manipulation scene. Moreover, we gather datasets for 3D reconstruction [22, 66, 26, 4], and derive corresponding 3D Gaussians by 3DGS training. The resulting 3D Gaussian scenes substitute for $\\mathcal{G}_{\\mathrm{bg}}^*$ , forming novel scene appearance for data augmentation. The edge of utilizing reconstructed 3D scenes is their consistent and diverse geometry across multiple camera views, which helps produce more realistic demonstrations. Nevertheless, due to the expense of 3DGS training on large-scale reconstruction datasets, we complement them with 2D images for greater appearance diversity.", + "bbox": [ + 501, + 659, + 921, + 917 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6) Lighting Condition", + "text_level": 1, + "bbox": [ + 89, + 71, + 243, + 85 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Discrepancy in lighting conditions is another barrier to deploying trained policy in unseen scenarios. To compensate for that, we augment the diffuse color of each Gaussian in the reconstructed scene through random scaling, offset, and noise. Concretely, for a Gaussian with original diffuse color $(r,g,b)$ , the augmented diffuse color values can be expressed as $(s_r r + o_r + \\Delta_r, s_g g + o_g + \\Delta_g, s_b b + o_b + \\Delta_b)$ , where $(s_r, s_g, s_b)$ stand for scaling factors, $(o_r, o_g, o_b)$ stand for offsets, and $(\\Delta_r, \\Delta_g, \\Delta_b)$ stand for random Gaussian noise. The scaling factors and offsets simulate changes in color contrast and scene brightness. Thus, they are shared among all the Gaussians in the scene. On the other hand, the random Gaussian noise is sampled independently for each Gaussian to simulate noise in images captured by cameras. The details of scaling factors, offsets, and Gaussian noise are elaborated in Appendix B.", + "bbox": [ + 73, + 85, + 491, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "An illustration of augmented demonstrations with six types of generalizations can be found in Appendix B.", + "bbox": [ + 73, + 327, + 491, + 358 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "C. Policy Training", + "text_level": 1, + "bbox": [ + 73, + 364, + 205, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We employ a modern, widely adopted transformer-based architecture [18, 51, 38, 55] to serve as the policy network, which is detailed in Appendix C. We process RGB images with ResNet-18 [21], and encode joint state using a multilayer perceptron (MLP). The latent of images and robot state is fed into a transformer encoder. Finally, an action decoder utilizes an MLP to convert the action latent into the action vector $a_{t}$ . The policy is trained with Behavioural Cloning (BC) in an end-to-end manner, aiming to maximize the likelihood of expert actions in demonstrations. We denote $o_k \\triangleq (I_k, q_k)$ as the observation at the $k$ -th frame of demonstrations $\\mathcal{D}$ , and $\\pi$ as our policy. The loss function can then be expressed as", + "bbox": [ + 73, + 383, + 490, + 565 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} ^ {\\mathrm {B C}} = \\mathbb {E} _ {(o _ {k}, a _ {k}) \\sim \\mathcal {D}} \\| a _ {k} - \\pi (o _ {k}) \\| ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 578, + 398, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Specifically, $I_{k}$ consists of two images from different eye-on-base cameras. We adopt relative end-effector pose as the action representation, which depicts the relative transformation between two consecutive end-effector poses under robot base frame. Further details of the training process can be found in Appendix D.", + "bbox": [ + 73, + 603, + 491, + 694 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "V. EXPERIMENTS", + "text_level": 1, + "bbox": [ + 217, + 702, + 346, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct comprehensive experiments in the real world to verify the effectiveness of our demonstration generation pipeline. Specifically, we aim to answer: given a single expert demonstration and multi-view images of the scene,", + "bbox": [ + 73, + 720, + 490, + 780 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) How efficient is data generation compared to manually collecting data?", + "2) How does the policy trained on generated demonstrations perform across various tasks compared to that trained on manually collected data?", + "3) How does the policy perform as the generated data scale up?", + "4) Can generated demonstrations enhance the robustness of the policy when facing various deployment settings, such" + ], + "bbox": [ + 73, + 780, + 491, + 917 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/dd23fb58097a52a0676ee630392ebd652e6e0cd011735ee77e91a73c4726d8b8.jpg", + "image_caption": [ + "Fig. 5: Real-world experiment setup. We employ a Franka Research 3 Robot and two eye-on-base RealSense D435i cameras." + ], + "image_footnote": [], + "bbox": [ + 514, + 73, + 908, + 281 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "as changes in object types, camera views, scene appearance, lighting conditions, and embodiment types?", + "bbox": [ + 503, + 349, + 919, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Experimental Setup", + "text_level": 1, + "bbox": [ + 503, + 395, + 663, + 410 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The real-world experiment setup is presented in Fig. 5. Concretely, we collect the expert demonstration on Franka Research 3 (FR3) Robot. Two Intel Realsense D435i eye-on-base cameras are mounted on the table top, capturing RGB image observations for the policy. We employ a 3D SpaceMouse to collect teleoperated demonstrations at a frequency of $10\\mathrm{Hz}$ . Policy inference is carried out on an NVIDIA RTX4090 GPU, with a latency of 0.1s imposed.", + "bbox": [ + 501, + 416, + 919, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In order to manifest the generalization ability of our pipeline to different task settings, we select five tasks for evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place, and Sweep.", + "bbox": [ + 503, + 537, + 919, + 598 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Pick Object task, the policy picks up a target object which is placed at different poses within a $30\\mathrm{cm}\\times 40\\mathrm{cm}$ workspace. In CloseDrawer task, the policy closes a drawer whose position is constrained to a $15\\mathrm{cm}\\times 40\\mathrm{cm}$ workspace, while its rotation about the z-axis is restricted to $\\left[-\\frac{\\pi}{8},\\frac{\\pi}{8}\\right]$ . In Pick-Place-Close task, the policy is expected to grasp an object, place it in the drawer, and then close the drawer. The drawer is placed in a $5\\mathrm{cm}\\times 5\\mathrm{cm}$ workspace, with a fixed orientation. The target object is located in a $10\\mathrm{cm}\\times 10\\mathrm{cm}$ workspace, whose rotation falls into range $\\left[-\\frac{\\pi}{8},\\frac{\\pi}{8}\\right]$ . In Dual Pick-Place task, the policy attempts to pick two target objects in a row and place them in a fixed drawer. Both of the objects are located in $10\\mathrm{cm}\\times 10\\mathrm{cm}$ workspaces, with yaw angles between $-\\frac{\\pi}{8}$ and $\\frac{\\pi}{8}$ . In Sweep task, the robot should first pick up a broom and then sweeps the chocolate beans into a dustpan. The broom is randomly placed within a $10\\mathrm{cm}\\times 10\\mathrm{cm}$ area, and the chocolate beans are randomly placed on the chopping board. Task setups are illustrated in Fig. 6. These five tasks require proficiency in executing basic pick-and-place actions, manipulating articulated objects, performing long-horizon tasks, and demonstrating skills involving tool use and", + "bbox": [ + 501, + 599, + 921, + 917 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/53bdabfc3bcfe1e3547e7ff8ab00b81643d6111bcc3f0286b640241c72bcd876.jpg", + "image_caption": [ + "Fig. 6: Task illustration. We design five manipulation tasks for real-world evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place and Sweep, whose details are elaborated in Sec. V-A." + ], + "image_footnote": [], + "bbox": [ + 83, + 66, + 250, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b9a164729ad40c6dadad6a0d166fe8cdcb32404ecd6fce30f340c478aaf9a819.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 250, + 66, + 415, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8549171b20db883a106ecd8a4fb7e8188d2842e143a98d206dfd88352a0f646f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 416, + 66, + 581, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/056fc96eff5a26b8330467dc5ccaab099169d151244129e6a8ca159a68dc2a9f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 66, + 748, + 191 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/60af5eb5b5e6d990c8aedc4700c76702cdd364cee0151b662b675636ff1cd24d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 750, + 66, + 913, + 191 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "functional motion. Together, they provide a comprehensive evaluation across various task settings.", + "bbox": [ + 73, + 241, + 491, + 270 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also conduct extensive real-world experiments to prove the effectiveness of our data generation pipeline in terms of different types of generalization. Notably, the evaluation of object pose generalization is incorporated into all experiments, including those focused on the other five types of generalization (object types, camera views, embodiment types, lighting conditions, and scene appearance). This is because object pose generalization is a fundamental requirement for task completion ability. For the other five types of generalization, the details are provided in Sec. V-D. Success rate (SR) is chosen as the evaluation metric in all experiments. Each policy is evaluated with 30 trials for a certain evaluation setting.", + "bbox": [ + 73, + 271, + 491, + 452 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "B. Efficiency of Augmenting Demonstrations", + "text_level": 1, + "bbox": [ + 73, + 460, + 377, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To answer Question 1, we need to justify that our pipeline is economical with both labor and time when generating data. The labor-saving property is obvious because demonstrations are generated automatically in our pipeline. We compare the average time consumption of manually collecting a real-world demonstration to that of generating a demonstration through our pipeline. Specifically, we adopt eight processes on an NVIDIA RTX 4090 GPU for paralleled data generation to efficiently utilize computational resources.", + "bbox": [ + 73, + 479, + 490, + 614 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The comparison study is conducted on all five tasks, and the result is shown in Table I. Our data generation pipeline that executed on a single GPU is more than 29 times faster than collecting data in the real world, with an average time consumption of 0.64s across all five tasks. With no human interference, our demonstration generation approach is able to generate visually diverse training data with little time expenditure.", + "bbox": [ + 73, + 616, + 491, + 736 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "C. Performance of the Policy Trained on Augmented Data", + "text_level": 1, + "bbox": [ + 73, + 746, + 472, + 761 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To answer Question 2 and 3, we compare the policies trained on generated demonstrations and manually collected demonstrations in terms of their success rates when facing various object poses. Moreover, we explore the performance of policies as generated data gradually scale up.", + "bbox": [ + 73, + 765, + 490, + 840 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The main results of the experiment are illustrated in Fig. 7. While policies trained on real-world demonstrations still have an edge over those trained on the same number of generated ones, our method manifests salient improvement in success rate as the generated demonstrations scale up. Concretely,", + "bbox": [ + 73, + 840, + 491, + 917 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "visuomotor policies trained on 800 generated demonstrations achieve comparable performance to those trained on 200 manually collected demonstrations. Moreover, training with 1800 generated demonstrations raises the success rate to an average of $94.7\\%$ , significantly surpassing the success rate achieved with 200 manually collected demonstrations. It is also worth mentioning that the policy achieves a $96.7\\%$ success rate on Dual Pick-Place task with our generated data, which is nearly $20\\%$ higher than the baseline (manually collected). These findings testify the effectiveness of our method in generating novel object poses for better generalization of visuomotor policies, and indicate promising scaling property as generated data scales up.", + "bbox": [ + 501, + 241, + 921, + 436 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "D. Robustness when Facing Various Deployment Settings", + "text_level": 1, + "bbox": [ + 504, + 450, + 893, + 465 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To answer Question 4, we augment the expert demonstration in five different dimensions: lighting conditions, scene appearance, camera views, object types, and embodiment types. We compare policies trained on real-world data, real-world data augmented using 2D augmentation approaches, and data generated via our pipeline. An illustration of the experiments for different generalization types is shown in Fig. 8.", + "bbox": [ + 503, + 470, + 919, + 575 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1) Lighting Condition", + "text_level": 1, + "bbox": [ + 521, + 577, + 674, + 590 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To demonstrate the effectiveness of lighting augmentation in our approach, we adopt five different scenarios for policy deployment, which are shown in Appendix E. We compare the performance of four policies that are trained respectively on:", + "bbox": [ + 503, + 592, + 919, + 652 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) 200 real-world demonstrations (Collected);", + "2) 1800 generated demonstrations with only object pose augmentation, which are the same as data used in V-C (Ours Pose-Only);", + "3) real-world demonstrations augmented with color jitter (Color Jitter);", + "4) 3200 demonstrations generated by our pipeline with both lighting condition and object pose augmentation (Ours)." + ], + "bbox": [ + 514, + 656, + 921, + 776 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Fig. 9, policies trained on augmented lighting conditions achieve an average of over $80\\%$ success rate across Pick Object, Close Driver, and Pick-Place-Close tasks, with an overall improvement over those trained on real-world data without augmentation by $70\\%$ . Furthermore, our policies show a significant edge over those trained on generated demonstrations with augmented object poses and real-world demonstrations augmented with color jitter, justifying the validity of lighting augmentation in our pipeline.", + "bbox": [ + 503, + 779, + 921, + 917 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b769051aa8353358bbea6381920c97be9cab33e0f3548ab34a755084f2d2e49a.jpg", + "table_caption": [ + "TABLE I: Comparison of demonstration collection time (s). We calculate the average time cost of data collection of a single demonstration over 100 demonstrations. Our method achieves more than 29 times the speed compared to the baseline." + ], + "table_footnote": [], + "table_body": "
Task TypePick ObjectClose PrinterPick-Place-PrintDual Pick-PlaceSweepAverage
Real-world13.210.124.727.020.419.1
Ours0.430.340.861.00.580.64
", + "bbox": [ + 125, + 103, + 859, + 167 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7c0ee42e2207295b362773c05b96cce7bdf9cb3c5a14fb57c35ec76aca30ed78.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 119, + 181, + 377, + 295 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ed7242ed314a3d14a5e43aeeb9a806ab6c3d6c1e23699bf13089e97f9fecf66c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 181, + 620, + 289 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0c0dd19ef5080d4329d728c1edeb0a459f24a88a67d8519ac275762bf8ef0154.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 624, + 181, + 867, + 289 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d412f2e90064cb5e4add7b49aa82fb97fd4fd497c8072ee8f2634f614d4c1693.jpg", + "image_caption": [ + "Fig. 7: Main results. Top left: We present the average success rate across five tasks. Our method shows promising scalability as the number of demonstration grows. The other five subfigures: For each task, we evaluate the success rate of policies trained from manually collected data and those generated by our method over 30 trials, using different number of demonstrations." + ], + "image_footnote": [], + "bbox": [ + 133, + 296, + 375, + 404 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c99791955c59958e854fdf45de3a0ffb3663991c619063f2566d76acdee2993c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 296, + 620, + 404 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/204cc20f13470490e19076be5708bdccc00578f619442b1173ebe8c8a84f6755.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 296, + 867, + 404 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2) Scene Appearance", + "text_level": 1, + "bbox": [ + 89, + 479, + 241, + 493 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Similar to the experiment on lighting conditions, we select five different scenarios for evaluation on scene appearance augmentation, which is illustrated in Appendix E. The four policies for comparison are trained in a similar manner as described in Sec. V-D1, with the key difference being that we employ image inpainting methods [68, 9, 67, 10] as more robust and suitable 2D augmentation baselines for appearance generalization. The results are shown in Fig. 9. The policy trained on data generated through our pipeline, incorporating both appearance and object pose augmentations, achieves superior performance compared to all baselines. Notably, it demonstrates over a $70\\%$ increase in success rates across all three tasks when compared to policies trained on data without appearance augmentation. In particular, our policy achieves $100\\%$ success rate on the Pick Object task, showcasing strong robustness against various background appearance.", + "bbox": [ + 73, + 493, + 491, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3) Camera View", + "text_level": 1, + "bbox": [ + 89, + 734, + 209, + 748 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We employ two different settings for camera view generalization: novel view and moving view. In novel view experiments, we select 30 poses for each camera, which are different from the training perspective. On the other hand, cameras are kept moving in moving view experiments. Similar to Sec. V-D1 and Sec. V-D2, we compare the performance of four policies that are trained respectively on:", + "bbox": [ + 73, + 750, + 490, + 854 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) 200 real-world demonstrations (Collected);", + "2) 1800 generated demonstrations with only object pose augmentation (Ours Pose-Only);", + "3) 3200 demonstrations stemmed from 200 real-world" + ], + "bbox": [ + 84, + 856, + 490, + 915 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "demonstrations, augmented using VISTA [50], which leverages novel view synthesis models to augment data from different views;", + "bbox": [ + 535, + 478, + 921, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4) 3200 generated demonstrations with camera view augmentation (Ours).", + "bbox": [ + 514, + 523, + 919, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present the results in Table II. Our policy is able to perform Pick Object task and Pick-Place-Close task with success rates of over $80\\%$ and $50\\%$ respectively, while the policies trained on data without augmentation can barely accomplish the task. Our approach also outperforms VISTA by a large margin. Notably, our policy achieves nearly $100\\%$ success rate on CloseDrawer task, manifesting strong robustness against novel camera views and moving cameras.", + "bbox": [ + 503, + 556, + 921, + 679 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4) Object Type", + "text_level": 1, + "bbox": [ + 519, + 681, + 629, + 695 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In order to demonstrate the effectiveness of our method in augmenting object types, we compare the performance of three different policies that are respectively trained on:", + "bbox": [ + 503, + 696, + 919, + 742 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) 400 real-world demonstrations with 5 real-world objects (Collected);", + "2) 6400 demonstrations stemmed from 200 real-world demonstrations, augmented using ROSIE [67], which utilizes image inpainting models to generate data with unseen objects;", + "3) 6400 demonstrations generated by our pipeline with object type augmentation (Ours)." + ], + "bbox": [ + 514, + 744, + 919, + 867 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "During deployment, we select five real-word objects that are different from all the objects covered in training process. We report the result in Fig. 10. The policy trained on 50 object", + "bbox": [ + 503, + 869, + 921, + 917 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5fc498dcd53b37e9bf2a31cb0c5a87fdec8b68b0eeafba95226135e5bb3b20ad.jpg", + "image_caption": [ + "Fig. 8: Illustration of real-world experiments for different generalization types. The data is collected in the original setting. When deploying the trained policy, we modify object poses, lighting conditions, scene appearance, camera views, object types, and embodiments to evaluate the robustness in different scenarios." + ], + "image_footnote": [], + "bbox": [ + 102, + 68, + 883, + 292 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2e3c1d8bf3f055ec267b19fbbcf4be774fe8fdaaaf18d9c9511fd4775d308de0.jpg", + "image_caption": [ + "Fig. 9: Performance when changing lighting conditions and appearance. We report the success rate of different policies under various lighting conditions and appearance. The policies trained with generated demonstrations with corresponding augmentations manifest remarkable advance compared to baseline policies." + ], + "image_footnote": [], + "bbox": [ + 96, + 364, + 509, + 593 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/593193a036e1192cd5ed93966c35e5f47f82a3d6aec90ed02ce3e52b86db2fbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 362, + 893, + 592 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "types showcases better adaptability to novel object types, improving the success rate of baseline models by over $40\\%$ . This demonstrates the effectiveness of our data generation pipeline in utilizing off-the-shelf 3D Content Generation models to generalize policy to novel objects.", + "bbox": [ + 73, + 662, + 493, + 739 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b5af8d67c630930530fc45bc66a04dbfea14f7bd87c0b73e0f0ba1c940517e9f.jpg", + "image_caption": [ + "Fig. 10: Performance on novel object types. The policy trained on data generated by RoboSplat shows a salient edge over baseline policies." + ], + "image_footnote": [], + "bbox": [ + 89, + 756, + 472, + 875 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5) Embodiment Type", + "text_level": 1, + "bbox": [ + 519, + 662, + 668, + 679 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our method supports generating demonstrations across different embodiment types as mentioned in Sec. IV-B4. To prove that, based on one demonstration collected with the Franka Research 3, we generate novel demonstrations for a UR5e robot equipped with a Robotiq 2F-85 gripper and deploy the learned policy directly in the real world. It is worth noting that policies trained on Franka Research 3 robot demonstrations fail to be deployed on UR5e robot due to frequent safety violations. We compare the performance of policies trained on embodiment-augmented demonstrations with those trained on data augmented using RoVi-Aug [8]. RoVi-Aug modifies real-world demonstrations by replacing the appearance of the embodiment through generative models.", + "bbox": [ + 501, + 684, + 921, + 880 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We present the performance of policies in Fig. 11. Policies trained on data generated using our pipeline achieve a success", + "bbox": [ + 503, + 885, + 921, + 917 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/db06a771fc3c7e7fb1af214d8d12133a1d23f93de90106fc8bc75691ee37af30.jpg", + "table_caption": [ + "TABLE II: Performance when changing camera view. We compare the success rate of different policies under two circumstances: novel camera view and moving camera view. The policies trained on demonstrations augmented using our approach showcase significant improvement over baseline policies." + ], + "table_footnote": [], + "table_body": "
Data SourcePick ObjectClose PrinterPick-Place-CloseAverage
Novel ViewMoving ViewNovel ViewMoving ViewNovel ViewMoving View
Collected6.70.016.713.30.00.06.1
Ours Pose-Only0.00.026.730.00.00.09.5
VISTA [50]33.333.356.770.033.316.740.6
Ours90.086.7100.096.753.356.780.6
", + "bbox": [ + 81, + 118, + 923, + 232 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "rate close to $100\\%$ on an embodiment different from the one used for demonstration collection. This result highlights its superior performance compared to the baseline in cross-embodiment transfer.", + "bbox": [ + 73, + 260, + 491, + 321 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f44bde63f046c6bacfede4ff3776e2d1fd5281da40a9fa7ceabbe1c7f488d922.jpg", + "image_caption": [ + "Fig. 11: Performance on cross embodiment experiments. We evaluate the learned policy directly on the UR5e robot and achieve a nearly $100\\%$ success rate that surpasses the 2D augmentation methods." + ], + "image_footnote": [], + "bbox": [ + 78, + 335, + 488, + 458 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "VI. LIMITATIONS", + "text_level": 1, + "bbox": [ + 218, + 558, + 346, + 571 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Due to the limitations of naive 3D Gaussian Splatting, it is incapable of handling deformable objects. Additionally, the pipeline lacks physical constraints, making it unsuitable for contact-rich and dynamic tasks. However, recent advancements in Gaussian Splatting [58, 1, 64, 42] provide promising opportunities to address these challenges. Future work could apply these techniques to generate data for a wider range of tasks.", + "bbox": [ + 73, + 579, + 490, + 685 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "VII. CONCLUSION", + "text_level": 1, + "bbox": [ + 214, + 696, + 352, + 709 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this work, we introduce RoboSplat, a novel demonstration generation approach that requires only a single collected demonstration and generates diverse and high-quality data for policy learning. Comprehensive real-world experiments show that our approach significantly enhances the robustness of visuomotor policies when encountering various disturbances.", + "bbox": [ + 73, + 717, + 491, + 808 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 205, + 820, + 361, + 832 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We sincerely thank Yang Tian and Xiao Chen for their fruitful discussions. This work is supported by the National Key R&D Program of China (2022ZD0160201), Shanghai Artificial Intelligence Laboratory, and China Postdoctoral Science Foundation (2023M741848).", + "bbox": [ + 73, + 840, + 491, + 917 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 665, + 261, + 759, + 273 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jad Abou-Chakra, Krishan Rana, Feras Dayoub, and Niko Suenderhauf. Physically embodied gaussian splatt-ting: A visually learnt and physically grounded 3d representation for robotics. In 8th Annual Conference on Robot Learning, 2024.", + "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "[3] Ezra Ameperosa, Jeremy A Collins, Mrinal Jain, and Animesh Garg. Rocoda: Counterfactual data augmentation for data-efficient robot learning from demonstrations. arXiv preprint arXiv:2411.16959, 2024.", + "[4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5470–5479, 2022.", + "[5] Paul J Besl and Neil D McKay. Method for registration of 3-d shapes. In Sensor fusion IV: control paradigms and data structures, volume 1611, pages 586-606. Spie, 1992.", + "[6] Ondrej Biza, Skye Thompson, Kishore Reddy Pagidi, Abhinav Kumar, Elise van der Pol, Robin Walters, Thomas Kipf, Jan-Willem van de Meent, Lawson LS Wong, and Robert Platt. One-shot imitation learning via interaction warping. arXiv preprint arXiv:2306.12392, 2023.", + "[7] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023.", + "[8] Lawrence Yunliang Chen, Chenfeng Xu, Karthik Dharmarajan, Muhammad Zubair Irshad, Richard Cheng, Kurt Keutzer, Masayoshi Tomizuka, Quan Vuong, and Ken Goldberg. Rovi-aug: Robot and viewpoint augmentation for cross-embodiment robot learning. arXiv preprint arXiv:2409.03403, 2024.", + "[9] Zoey Chen, Sho Kiami, Abhishek Gupta, and Vikash" + ], + "bbox": [ + 514, + 282, + 921, + 917 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kumar. Genaug: Retargeting behaviors to unseen situations via generative augmentation. arXiv preprint arXiv:2302.06671, 2023.", + "[10] Zoey Chen, Zhao Mandi, Homanga Bharadhwaj, Mohit Sharma, Shuran Song, Abhishek Gupta, and Vikash Kumar. Semantically controllable augmentations for generalizable robot learning. The International Journal of Robotics Research, page 02783649241273686, 2024.", + "[11] Cheng Chi, Zhenjia Xu, Siyuan Feng, Eric Cousineau, Yilun Du, Benjamin Burchfiel, Russ Tedrake, and Shuran Song. Diffusion policy: Visuomotor policy learning via action diffusion. The International Journal of Robotics Research, page 02783649241273668, 2023.", + "[12] Cheng Chi, Zhenjia Xu, Chuer Pan, Eric Cousineau, Benjamin Burchfiel, Siyuan Feng, Russ Tedrake, and Shuran Song. Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots. arXiv preprint arXiv:2402.10329, 2024.", + "[13] Ethan Chun, Yilun Du, Anthony Simeonov, Tomas Lozano-Perez, and Leslie Kaelbling. Local neural descriptor fields: Locally conditioned object representations for manipulation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 1830-1836. IEEE, 2023.", + "[14] Murtaza Dalal, Min Liu, Walter Talbott, Chen Chen, Deepak Pathak, Jian Zhang, and Ruslan Salakhutdinov. Local policies enable zero-shot long-horizon manipulation. arXiv preprint arXiv:2410.22332, 2024.", + "[15] Linxi Fan, Guanzhi Wang, De-An Huang, Zhiding Yu, Li Fei-Fei, Yuke Zhu, and Anima Anandkumar. Secant: Self-expert cloning for zero-shot generalization of visual policies. arXiv preprint arXiv:2106.09678, 2021.", + "[16] Hao-Shu Fang, Chenxi Wang, Hongjie Fang, Minghao Gou, Jirong Liu, Hengxu Yan, Wenhai Liu, Yichen Xie, and Cewu Lu. Anygrasp: Robust and efficient grasp perception in spatial and temporal domains. IEEE Transactions on Robotics, 2023.", + "[17] Jian Gao, Chun Gu, Youtian Lin, Zhihao Li, Hao Zhu, Xun Cao, Li Zhang, and Yao Yao. Relightable 3d gaussians: Realistic point cloud relighting with brdf decomposition and ray tracing. In European Conference on Computer Vision, pages 73-89. Springer, 2025.", + "[18] Siddhant Haldar, Zhuoran Peng, and Lerrel Pinto. Baku: An efficient transformer for multi-task policy learning. arXiv preprint arXiv:2406.07539, 2024.", + "[19] Nicklas Hansen and Xiaolong Wang. Generalization in reinforcement learning by soft data augmentation. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13611-13617. IEEE, 2021.", + "[20] Nicklas Hansen, Hao Su, and Xiaolong Wang. Stabilizing deep q-learning with convnets and vision transformers under data augmentation. Advances in neural information processing systems, 34:3680-3693, 2021.", + "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision" + ], + "bbox": [ + 76, + 71, + 491, + 917 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "and pattern recognition, pages 770-778, 2016.", + "[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (ToG), 37(6):1-15, 2018.", + "[23] Alex Irpan, Alexander Herzog, Alexander Toshkov Toshev, Andy Zeng, Anthony Brohan, Brian Andrew Ichter, Byron David, Carolina Parada, Chelsea Finn, Clayton Tan, et al. Do as i can, not as i say: Grounding language in robotic affordances. In Conference on Robot Learning, number 2022, 2022.", + "[24] Mazeyu Ji, Ri-Zhao Qiu, Xueyan Zou, and Xiaolong Wang. Graspsplats: Efficient manipulation with 3d feature splatting. arXiv preprint arXiv:2409.02084, 2024.", + "[25] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023.", + "[26] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017.", + "[27] Georgios Kopanas, Thomas Leimkuhler, Gilles Rainer, Clément Jambon, and George Drettakis. Neural point catauastics for novel-view synthesis of reflections. ACM Transactions on Graphics (TOG), 41(6):1-15, 2022.", + "[28] Ilya Kostrikov, Denis Yarats, and Rob Fergus. Image augmentation is all you need: Regularizing deep reinforcement learning from pixels. arXiv preprint arXiv:2004.13649, 2020.", + "[29] Misha Laskin, Kimin Lee, Adam Stooke, Lerrel Pinto, Pieter Abbeel, and Aravind Srinivas. Reinforcement learning with augmented data. Advances in neural information processing systems, 33:19884-19895, 2020.", + "[30] Mara Levy, Siddhant Haldar, Lerrel Pinto, and Abhinav Shirivastava. P3-po: Prescriptive point priors for visuospatial generalization of robot policies. arXiv preprint arXiv:2412.06784, 2024.", + "[31] Xinhai Li, Jialin Li, Ziheng Zhang, Rui Zhang, Fan Jia, Tiancai Wang, Haoqiang Fan, Kuo-Kun Tseng, and Ruiping Wang. Robogsim: A real2sim2real robotic gaussian splatting simulator. arXiv preprint arXiv:2411.11839, 2024.", + "[32] Zhihao Liang, Qi Zhang, Ying Feng, Ying Shan, and Kui Jia. Gs-ir: 3d gaussian splatting for inverse rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21644–21653, 2024.", + "[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014.", + "[34] Guanxing Lu, Shiyi Zhang, Ziwei Wang, Changliu Liu," + ], + "bbox": [ + 508, + 71, + 921, + 917 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jiwen Lu, and Yansong Tang. Manigaussian: Dynamic gaussian splatting for multi-task robotic manipulation. In European Conference on Computer Vision, pages 349-366. Springer, 2025.", + "[35] Zhao Mandi, Homanga Bharadhwaj, Vincent Moens, Shuran Song, Aravind Rajeswaran, and Vikash Kumar. Cacti: A framework for scalable multi-task multi-scene visual imitation learning. arXiv preprint arXiv:2212.05711, 2022.", + "[36] Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Rohun Kulkarni, Li Fei-Fei, Silvio Savarese, Yuke Zhu, and Roberto Martin-Martín. What matters in learning from offline human demonstrations for robot manipulation. arXiv preprint arXiv:2108.03298, 2021.", + "[37] Ajay Mandlekar, Soroush Nasiriany, Bowen Wen, Iretiayo Akinola, Yashraj Narang, Linxi Fan, Yuke Zhu, and Dieter Fox. Mimicgen: A data generation system for scalable robot learning using human demonstrations. arXiv preprint arXiv:2310.17596, 2023.", + "[38] Octo Model Team, Dibya Ghosh, Homer Walke, Karl Pertsch, Kevin Black, Oier Mees, Sudeep Dasari, Joel Hejna, Charles Xu, Jianlan Luo, Tobias Kreiman, You Liang Tan, Lawrence Yunliang Chen, Pannag Sanketi, Quan Vuong, Ted Xiao, Dorsa Sadigh, Chelsea Finn, and Sergey Levine. Octo: An open-source generalist robot policy. In Proceedings of Robotics: Science and Systems, Delft, Netherlands, 2024.", + "[39] Abby O'Neill, Abdul Rehman, Abhinav Gupta, Abhiram Maddukuri, Abhishek Gupta, Abhishek Padalkar, Abraham Lee, Acorn Pooley, Agrim Gupta, Ajay Mandlekar, et al. Open x-embodiment: Robotic learning datasets and rt-x models. arXiv preprint arXiv:2310.08864, 2023.", + "[40] Mohammad Nomaan Qureshi, Sparsh Garg, Francisco Yandun, David Held, George Kantor, and Abhisesh Silwal. Splatsim: Zero-shot sim2real transfer of rgb manipulation policies using gaussian splatting. arXiv preprint arXiv:2409.10161, 2024.", + "[41] Tianhe Ren, Shilong Liu, Ailing Zeng, Jing Lin, Kun-chang Li, He Cao, Jiayu Chen, Xinyu Huang, Yukang Chen, Feng Yan, et al. Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159, 2024.", + "[42] Boxiang Rong, Artur Grigorev, Wenbo Wang, Michael J Black, Bernhard Thomaszewski, Christina Tsalicoglou, and Otmar Hilliges. Gaussian garments: Reconstructing simulation-ready clothing with photorealistic appearance from multi-view video. arXiv preprint arXiv:2409.08189, 2024.", + "[43] Hyunwoo Ryu, Hong-in Lee, Jeong-Hoon Lee, and Jongeun Choi. Equivariant descriptor fields: Se (3)-equivariant energy-based models for end-to-end visual robotic manipulation learning. arXiv preprint arXiv:2206.08321, 2022.", + "[44] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on" + ], + "bbox": [ + 76, + 71, + 491, + 916 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Computer Vision and Pattern Recognition (CVPR), 2016.", + "[45] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016.", + "[46] Younggyo Seo, Junsu Kim, Stephen James, Kimin Lee, Jinwoo Shin, and Pieter Abbeel. Multi-view masked world models for visual robotic manipulation. In International Conference on Machine Learning, pages 30613-30632. PMLR, 2023.", + "[47] Ola Shorinwa, Johnathan Tucker, Aliyah Smith, Aiden Swann, Timothy Chen, Roya Firoozi, Monroe Kennedy III, and Mac Schwager. Splat-mover: Multi-stage, open-vocabulary robotic manipulation via editable gaussian splatting. arXiv preprint arXiv:2405.04378, 2024.", + "[48] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. In 2022 International Conference on Robotics and Automation (ICRA), pages 6394-6400. IEEE, 2022.", + "[49] Ritvik Singh, Arthur Allshire, Ankur Handa, Nathan Ratliff, and Karl Van Wyk. Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands. arXiv preprint arXiv:2412.01791, 2024.", + "[50] Stephen Tian, Blake Wulfe, Kyle Sargent, Katherine Liu, Sergey Zakharov, Vitor Guizilini, and Jiajun Wu. View-invariant policy learning via zero-shot novel view synthesis. arXiv preprint arXiv:2409.03685, 2024.", + "[51] Yang Tian, Sizhe Yang, Jia Zeng, Ping Wang, Dahua Lin, Hao Dong, and Jiangmiao Pang. Predictive inverse dynamics models are scalable learners for robotic manipulation. arXiv preprint arXiv:2412.15109, 2024.", + "[52] Marcel Torne, Anthony Simeonov, Zechu Li, April Chan, Tao Chen, Abhishek Gupta, and Pulkit Agrawal. Reconciling reality through simulation: A real-to-sim-to-real approach for robust manipulation. arXiv preprint arXiv:2403.03949, 2024.", + "[53] Pietro Vitiello, Kamil Dreczkowski, and Edward Johns. One-shot imitation learning: A pose estimation perspective. arXiv preprint arXiv:2310.12077, 2023.", + "[54] Vitalis Vosylius and Edward Johns. Instant policy: Incontext imitation learning via graph diffusion. arXiv preprint arXiv:2411.12633, 2024.", + "[55] Hongtao Wu, Ya Jing, Chilam Cheang, Guangzeng Chen, Jiafeng Xu, Xinghang Li, Minghuan Liu, Hang Li, and Tao Kong. Unleashing large-scale video generative pretraining for visual robot manipulation, 2023.", + "[56] Yuxuan Wu, Lei Pan, Wenhua Wu, Guangming Wang, Yanzi Miao, and Hesheng Wang. Rl-gsbridge: 3d gaussian splatting based real2sim2real method for robotic manipulation learning. arXiv preprint arXiv:2409.20291, 2024.", + "[57] Jianfeng Xiang, Zelong Lv, Sicheng Xu, Yu Deng, Ruicheng Wang, Bowen Zhang, Dong Chen, Xin Tong," + ], + "bbox": [ + 506, + 71, + 921, + 916 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "and Jiaolong Yang. Structured 3d latents for scalable and versatile 3d generation. arXiv preprint arXiv:2412.01506, 2024.", + "[58] Tianyi Xie, Zeshun Zong, Yuxing Qiu, Xuan Li, Yutao Feng, Yin Yang, and Chenfanfu Jiang. Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4389-4398, 2024.", + "[59] Zhengrong Xue, Shuying Deng, Zhenyang Chen, Yixuan Wang, Zhecheng Yuan, and Huazhe Xu. Demogen: Synthetic demonstration generation for data-efficient visuomotor policy learning. arXiv preprint arXiv:2502.16932, 2025.", + "[60] Jingyun Yang, Zi-ang Cao, Congyue Deng, Rika Antonova, Shuran Song, and Jeannette Bohg. Equibot: Sim (3)-equivariant diffusion policy for generalizable and data efficient learning. arXiv preprint arXiv:2407.01479, 2024.", + "[61] Jingyun Yang, Congyue Deng, Jimmy Wu, Rika Antonova, Leonidas Guibas, and Jeannette Bohg. Equiv-act: Sim (3)-equivariant visuomotor policies beyond rigid object manipulation. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 9249–9255. IEEE, 2024.", + "[62] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024.", + "[63] Sizhe Yang, Yanjie Ze, and Huazhe Xu. Movie: Visual model-based policy adaptation for view generalization. Advances in Neural Information Processing Systems, 36, 2024.", + "[64] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20331-20341, 2024.", + "[65] Mingqiao Ye, Martin Danelljan, Fisher Yu, and Lei Ke. Gaussian grouping: Segment and edit anything in 3d scenes. In European Conference on Computer Vision, pages 162-179. Springer, 2025.", + "[66] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22, 2023.", + "[67] Tianhe Yu, Ted Xiao, Austin Stone, Jonathan Tompson, Anthony Brohan, Su Wang, Jaspiar Singh, Clayton Tan, Jodilyn Peralta, Brian Ichter, et al. Scaling robot learning with semantically imagined experience. arXiv preprint arXiv:2302.11550, 2023.", + "[68] Chengbo Yuan, Suraj Joshi, Shaoting Zhu, Hang Su," + ], + "bbox": [ + 76, + 70, + 491, + 917 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hang Zhao, and Yang Gao. Roboengine: Plug-and-play robot data augmentation with semantic robot segmentation and background generation. arXiv preprint arXiv:2503.18738, 2025.", + "[69] Zhecheng Yuan, Tianming Wei, Shuiqi Cheng, Gu Zhang, Yuanpei Chen, and Huazhe Xu. Learning to manipulate anywhere: A visual generalizable framework for reinforcement learning. arXiv preprint arXiv:2407.15815, 2024.", + "[70] Xinyu Zhang and Abdeslam Boullarias. One-shot imitation learning with invariance matching for robotic manipulation. arXiv preprint arXiv:2405.13178, 2024.", + "[71] Yuhang Zheng, Xiangyu Chen, Yupeng Zheng, Songen Gu, Runyi Yang, Bu Jin, Pengfei Li, Chengliang Zhong, Zengmao Wang, Lina Liu, et al. Gaussiangrasper: 3d language gaussian splatting for open-vocabulary robotic grasping. arXiv preprint arXiv:2403.09637, 2024." + ], + "bbox": [ + 506, + 70, + 921, + 328 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "APPENDIX", + "text_level": 1, + "bbox": [ + 245, + 71, + 321, + 84 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A. Applying Transformation and Scaling to 3D Gaussians", + "text_level": 1, + "bbox": [ + 73, + 93, + 470, + 109 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section outlines how to apply transformations (translation, rotation) and scaling to 3D Gaussians.", + "bbox": [ + 73, + 114, + 490, + 143 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The Gaussian primitive typically possesses three core properties: 1) a center position in three-dimensional space; 2) an orientation that specifies the tilt of its principal axes, commonly represented as a quaternion; 3) a scale indicating its width or narrowness. Additionally, Gaussian primitives can be enhanced with Spherical Harmonics (SH) to capture complex, direction-dependent color features.", + "bbox": [ + 73, + 145, + 491, + 251 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "When applying a transformation to the Gaussian primitive, the following steps should be taken: 1) update the center position by scaling, rotating, and then adding the translation offset; 2) update the orientation by combining the existing rotation with the new rotation; 3) adjust the scale by multiplying by the scaling factor; 4) rotate the Spherical Harmonics coefficients by using the Wigner D matrices.", + "bbox": [ + 73, + 251, + 491, + 357 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B. Details of Demonstration Augmentation Process", + "text_level": 1, + "bbox": [ + 73, + 369, + 424, + 386 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We expand on the details of the demonstration augmentation process in this section. An illustration of augmented demonstrations is provided in Fig. 12.", + "bbox": [ + 73, + 391, + 490, + 436 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "1) Object pose", + "text_level": 1, + "bbox": [ + 93, + 438, + 197, + 452 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As mentioned in Sec. IV-B1, we transform the end-effector poses at key frames equivariantly according to the transformation that is applied to the target object. However, considering the symmetry of the gripper, we perform post-processing on the transformed end-effector pose.", + "bbox": [ + 73, + 453, + 491, + 527 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Suppose the rotation of the transformed end-effector pose can be expressed as $(r_x, r_y, r_z)$ in the format of XYZ Euler angles. We replace $r_z$ with $r_z'$ , which can be calculated as:", + "bbox": [ + 73, + 529, + 491, + 575 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nr _ {z} ^ {\\prime} = \\left\\{ \\begin{array}{l l} r _ {z} & - \\frac {\\pi}{2} \\leqslant r _ {z} \\leqslant \\frac {\\pi}{2} \\\\ r _ {z} + \\pi & r _ {z} < - \\frac {\\pi}{2} \\\\ r _ {z} - \\pi & r _ {z} > \\frac {\\pi}{2}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 593, + 388, + 650 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The resulting Euler angles $(r_x, r_y, r_z')$ form the final rotation of the end-effector, which prevents the end-effector from performing redundant rotation along its $z$ -axis.", + "bbox": [ + 73, + 656, + 488, + 702 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "2) Camera view", + "text_level": 1, + "bbox": [ + 89, + 703, + 207, + 715 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As aforementioned in Sec. V-D3, we enumerate the hyperparameters of camera view augmentations and their range of randomization in Table III. Suppose the camera view in the expert demonstration has target point $O_{c}^{\\mathrm{expert}} = (x_{c}^{0},y_{c}^{0},z_{c}^{0})$ and corresponding spherical coordinates $(r^0,\\theta^0,\\varphi^0)$ . Thereby, the target point $O_{c} = (x_{c},y_{c},z_{c})$ and corresponding spherical coordinates $(r,\\theta ,\\varphi)$ are sampled from uniform distributions, ranging between $(x_c^0\\pm \\Delta x_c,y_c^0\\pm \\Delta y_c,z_c^0\\pm \\Delta z_c,r^0\\pm \\Delta r,\\theta^0\\pm \\Delta \\theta ,\\varphi^0\\pm \\Delta \\varphi)$ .", + "bbox": [ + 73, + 718, + 491, + 854 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3) Lighting condition", + "text_level": 1, + "bbox": [ + 89, + 856, + 241, + 869 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present the hyperparameters of lighting condition augmentation in this section. First, we normalize the RGB values of each pixel with minimum value 0 and maximum value 1.", + "bbox": [ + 73, + 869, + 491, + 917 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/a41aa0ae08cfe0f40d88521de28cdd9b71e1cb3a141d22dbcaafc61c97e8a93c.jpg", + "table_caption": [ + "TABLE III: Camera view augmentation hyperparameters and their range of randomization." + ], + "table_footnote": [], + "table_body": "
HyperparameterValue
Δxc0.1(m)
Δyc0.1(m)
Δzc0.1(m)
Δr0.2(m)
Δθπ/6
Δφπ/6
", + "bbox": [ + 612, + 103, + 808, + 229 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Then, we stipulate that the hyperparameters are sampled from the following distributions:", + "bbox": [ + 503, + 258, + 919, + 290 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left(\\Delta_ {r}, \\Delta_ {g}, \\Delta_ {b}\\right) \\sim \\mathcal {N} (\\mathbf {0}, 0. 1 ^ {2} \\mathbf {I}), \\\\ s _ {r}, s _ {g}, s _ {b} \\sim \\text {U n i f o r m} (0. 3, 1. 8), \\\\ o _ {r}, o _ {g}, o _ {b} \\sim \\text {U n i f o r m} (- 0. 3, 0. 3). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 311, + 836, + 368 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C. Policy Architecture", + "text_level": 1, + "bbox": [ + 504, + 388, + 660, + 404 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As illustrated in Fig. 13, the policy processes two types of inputs: images and robot states. We use different encoders to tokenize each modality accordingly. For image inputs, the images are first passed through a ResNet-18 vision encoder to generate visual embeddings. We employ a linear layer to extract compact visual features. For the robot state, we encode it into state tokens using a multi-layer perceptron (MLP).", + "bbox": [ + 501, + 410, + 919, + 516 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The multi-modal encoder in our model is based on a GPT-2 style transformer architecture. Before feeding the sequential image and state tokens into the transformer, we append readout tokens [ACT] to the end. These readout tokens attend to embeddings from different modalities, serving as action latents used for action prediction.", + "bbox": [ + 501, + 517, + 919, + 607 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Encoded by the multi-modal encoder, the action latents generated by the [ACT] tokens are fed into the readout decoders to predict actions. The action decoder utilizes an MLP to transform the action latent into the action vector. We predict a chunk of 10 future actions. Compared to single-step action prediction, predicting multiple steps provides temporal action consistency and robustness to idle actions [11].", + "bbox": [ + 501, + 608, + 919, + 714 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "D. Training Details", + "text_level": 1, + "bbox": [ + 504, + 728, + 643, + 743 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "During training, the input at each timestep consists of two images captured from two eye-on-base cameras, along with the robot state. The robot state includes both the arm state and the gripper state. The gripper state is binary, indicating whether the gripper is open or closed. For the Franka FR3 robot, the arm state is 7-dimensional, while for the UR5e robot, it is 6-dimensional.", + "bbox": [ + 501, + 750, + 919, + 853 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The policy operates with a history length of 1, and the size of the action chunk is set to 10. During inference, we utilize temporal ensemble techniques to compute a weighted average of the multi-step actions.", + "bbox": [ + 501, + 854, + 921, + 917 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3d6896c1cb137ca01211f8119d84a7c86a3674a73e95b5832cbeaff0625eac10.jpg", + "image_caption": [ + "Fig. 12: Illustration of augmented demonstrations. Type of generalization from the top row to the bottom row: object pose, lighting condition, scene appearance, object type, camera view, and embodiment type." + ], + "image_footnote": [], + "bbox": [ + 114, + 63, + 877, + 654 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The policy is trained using a single NVIDIA RTX 4090 GPU, with a batch size of 256 and a learning rate of 1e-4. Depending on the number of demonstrations, the policy is trained for varying numbers of epochs. The hyperparameters used during training are detailed in Table IV.", + "bbox": [ + 73, + 710, + 490, + 787 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E. Illustration of Real-World Experiment Settings", + "text_level": 1, + "bbox": [ + 73, + 792, + 413, + 808 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We illustrate the experiment settings on lighting condition generalization in Fig. 14. The flashing light alternates between red and blue light at a frequency of $4\\mathrm{Hz}$ . Every lighting condition takes up 6 trials in a single experiment.", + "bbox": [ + 73, + 811, + 490, + 871 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Besides, we present the real-world settings on appearance generalization in Fig. 15. Each scenario accounts for 5 trials in a single experiment.", + "bbox": [ + 73, + 872, + 491, + 917 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/769c1cb609e8187e90aa9a54525152cb88b18d9c939ca5cea3edcf9e9d686ff0.jpg", + "image_caption": [ + "Fig. 13: Policy architecture." + ], + "image_footnote": [], + "bbox": [ + 527, + 728, + 893, + 875 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/634185e845433b270dbf4c6ed6419b70d4472463ac896032f1924fce80c1f39d.jpg", + "table_caption": [ + "TABLE IV: Policy training hyperparameters." + ], + "table_footnote": [], + "table_body": "
Batch Size256
Learning Rate1e-4
Training Epochs1400 (100 demonstrations)
1000 (200 demonstrations)
800 (400 demonstrations)
700 (800 demonstrations)
500 (1800 demonstrations)
300 (3200 demonstrations)
200 (6400 demonstrations)
Image Size128*128
OptimizerAdamW
History Length1
Action Chunk Length10
", + "bbox": [ + 328, + 109, + 661, + 463 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/59207633f7c3959f655ac6bb14b32176d3c231d6318185a78be5b3be508d1b8a.jpg", + "image_caption": [ + "(a) Flashing light (Red)" + ], + "image_footnote": [], + "bbox": [ + 114, + 522, + 367, + 669 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/16dd2a576111e4901e9f33a58f76893230b27ec24632b63a391010d321ebb65c.jpg", + "image_caption": [ + "(b) Flashing light (Blue)" + ], + "image_footnote": [], + "bbox": [ + 372, + 522, + 625, + 669 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/329b55cea8fe8b03a6e530fbf3a83fabe0ed3b487f4a86120899ab12039a36be.jpg", + "image_caption": [ + "(c) Dark light" + ], + "image_footnote": [], + "bbox": [ + 629, + 522, + 883, + 669 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/13dcaa9e3a9dfa6d3ab14b9ff9eb273533771f796df5a8d843bd8476f6aafff4.jpg", + "image_caption": [ + "(d) Bright light" + ], + "image_footnote": [], + "bbox": [ + 114, + 696, + 367, + 845 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/d5097ed7bc541524999fba782271c6bc4995c4fd1d3def8c5567bd83a2267a53.jpg", + "image_caption": [ + "(e) Green light" + ], + "image_footnote": [], + "bbox": [ + 372, + 696, + 625, + 845 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c977fae0b468eff6be7f5aa6a8aee6bcc3d814a04aeddbe4bf91e1d72ad21c80.jpg", + "image_caption": [ + "(f) Yellow light", + "Fig. 14: Illustration of real-world experiment on lighting generalization." + ], + "image_footnote": [], + "bbox": [ + 629, + 696, + 883, + 845 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2e0fd12ea391852ff252c6fa013f58c83c31631f5f61d1967a0de0a5897373ab.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 112, + 306, + 367, + 455 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d9cd8ada7b70be3c181ebc936eaa3c6cb7f99d355e76e7d0442c35452458b5ee.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 370, + 306, + 625, + 455 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/7fb3bdba025175d006730e46359f428af9eb04d23d00c54192248e1fc363c727.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 627, + 306, + 885, + 455 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/713492fd54f41dfe9c9392349b53cc107ba56211f668b2e7a40851b6a50c577b.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 112, + 482, + 369, + 632 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ce3b3d5d5e34b5db3092a19eba1ca889fe0cf2d6719286ac72de291560196841.jpg", + "image_caption": [ + "(e)" + ], + "image_footnote": [], + "bbox": [ + 370, + 482, + 625, + 632 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/b3bb47439709f6859a0518f05864ad27d39444c7291615594c3434026ef8983f.jpg", + "image_caption": [ + "(f)", + "Fig. 15: Illustration of real-world experiment on appearance generalization." + ], + "image_footnote": [], + "bbox": [ + 627, + 482, + 885, + 632 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_model.json b/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2aa9e764d2193880b5ec3ece48c489b9e8dc3a4b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_model.json @@ -0,0 +1,3765 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.13175v1 [cs.RO] 17 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.102, + 0.073, + 0.896, + 0.143 + ], + "angle": 0, + "content": "Novel Demonstration Generation with Gaussian Splitting Enables Robust One-Shot Manipulation" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.159, + 0.756, + 0.177 + ], + "angle": 0, + "content": "Sizhe Yang\\*,1,2 Wenye \\(\\mathrm{Yu}^{*,1,3}\\) Jia Zeng\\(^{1}\\) Jun Lv\\(^{3}\\) Kerui Ren\\(^{1,3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.323, + 0.178, + 0.668, + 0.194 + ], + "angle": 0, + "content": "Cewu Lu\\(^{3}\\) Dahua Lin\\(^{1,2}\\) Jiangmiao Pang\\(^{1,\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.194, + 0.749, + 0.211 + ], + "angle": 0, + "content": "\\(^{1}\\)Shanghai AI Laboratory \\(^{2}\\)The Chinese University of Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.378, + 0.211, + 0.614, + 0.228 + ], + "angle": 0, + "content": "\\(^{3}\\)Shanghai Jiao Tong University" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.228, + 0.678, + 0.244 + ], + "angle": 0, + "content": "* Equal contributions † Corresponding author" + }, + { + "type": "text", + "bbox": [ + 0.308, + 0.246, + 0.679, + 0.262 + ], + "angle": 0, + "content": "Project page: https://yangsizhe.github.io/robosplat/" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.285, + 0.484, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.387, + 0.349, + 0.399 + ], + "angle": 0, + "content": "Novel Demonstration Generation" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.406, + 0.207, + 0.483 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.484, + 0.206, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.407, + 0.327, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.483, + 0.327, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.406, + 0.449, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.483, + 0.449, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.489, + 0.286, + 0.596, + 0.298 + ], + "angle": 0, + "content": "Generalization" + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.301, + 0.621, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.38, + 0.62, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.458, + 0.621, + 0.536 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.543, + 0.639, + 0.552 + ], + "angle": 0, + "content": "Training Data Source:" + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.327, + 0.691, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.408, + 0.696, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.699, + 0.301, + 0.834, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.701, + 0.38, + 0.834, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.46, + 0.696, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.699, + 0.459, + 0.834, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.644, + 0.538, + 0.864, + 0.546 + ], + "angle": 0, + "content": "Manually Collected + Previous 2D Augmentation" + }, + { + "type": "image_caption", + "bbox": [ + 0.644, + 0.547, + 0.84, + 0.556 + ], + "angle": 0, + "content": "Manually Collected Ours (Generated)" + }, + { + "type": "image", + "bbox": [ + 0.84, + 0.327, + 0.91, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.84, + 0.408, + 0.91, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.84, + 0.484, + 0.91, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.568, + 0.922, + 0.628 + ], + "angle": 0, + "content": "Fig. 1: Starting from a single expert demonstration and multi-view images, our method generates diverse and visually realistic data for policy learning, enabling robust performance across six types of generalization in the real world. Compared to previous 2D data augmentation methods, our approach achieves significantly better results across various generalization types. Notably, we achieve this within a unified framework." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.636, + 0.492, + 0.913 + ], + "angle": 0, + "content": "Abstract—Visuomotor policies learned from teleoperated demonstrations face challenges such as lengthy data collection, high costs, and limited data diversity. Existing approaches address these issues by augmenting image observations in RGB space or employing Real-to-Sim-to-Real pipelines based on physical simulators. However, the former is constrained to 2D data augmentation, while the latter suffers from imprecise physical simulation caused by inaccurate geometric reconstruction. This paper introduces RoboSplat, a novel method that generates diverse, visually realistic demonstrations by directly manipulating 3D Gaussians. Specifically, we reconstruct the scene through 3D Gaussian Splatting (3DGS), directly edit the reconstructed scene, and augment data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types. Comprehensive real-world experiments demonstrate that RoboSplat significantly enhances the generalization of visuomotor policies under diverse disturbances. Notably, while policies trained on hundreds of real-world demonstrations with additional" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.636, + 0.922, + 0.675 + ], + "angle": 0, + "content": "2D data augmentation achieve an average success rate of \\(57.2\\%\\), RoboSplat attains \\(87.8\\%\\) in one-shot settings across six types of generalization in the real world." + }, + { + "type": "title", + "bbox": [ + 0.647, + 0.686, + 0.78, + 0.699 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.705, + 0.921, + 0.886 + ], + "angle": 0, + "content": "Imitation learning for visuomotor policies has emerged as a promising paradigm in robot manipulation. However, policies learned through imitation often display limited robustness in deployment scenarios that differ substantially from expert demonstrations, primarily due to insufficient coverage of visual domains in the training data. Increasing the volume and diversity of real-world data is an effective strategy for enhancing robustness [12]; however, acquiring human-collected demonstrations is prohibitively time-consuming and labor-intensive. Consequently, substantial efforts have been devoted to generating diverse expert data without engaging with real-world environments [68, 69, 49, 8, 10, 67, 9, 35, 50, 59]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.887, + 0.922, + 0.918 + ], + "angle": 0, + "content": "Simulated environments offer a low-cost platform for data synthesis [49, 69]. However, the Sim-to-Real gap presents" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.071, + 0.49, + 0.373 + ], + "angle": 0, + "content": "significant challenges that hinder policy performance in real-world scenarios. Although Real-to-Sim-to-Real pipelines can narrow this gap considerably, replicating real-world manipulation scenes in simulation remains complex and labor-intensive. In particular, inaccuracies in geometric reconstructions often lead to imprecise physical simulations. Moreover, existing Real-to-Sim-to-Real approaches primarily generate data within monotonously reconstructed scenes, resulting in policies that are tailored only to those specific environments. Another line of work sheds light on augmenting image observations for better visual generalization. By editing different semantic parts of the image, these approaches generate novel scene configurations, in terms of background appearances [68, 9, 67, 10], embodiment types [8], object types [67], and camera views [50]. While these image augmentation methods are convenient, their limited consideration of 3D spatial information results in spatially inaccurate data generation. For more effective data augmentation, explicit 3D representations that retain accurate spatial information and are realistically renderable are required." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.373, + 0.49, + 0.524 + ], + "angle": 0, + "content": "Recently, 3D Gaussian Splatting (3DGS) [25] has become a burgeoning approach to superior reconstruction and rendering. Thanks to its explicit representation of the scene, 3DGS enables interpretable editing of the reconstructed scene, which paves the way for generating novel manipulation configurations. Furthermore, as a 3D representation of the scene, 3DGS retains spatial information from the real world and allows for consistent rendering from multiple perspectives, which makes it the real-world counterpart of a simulator's graphics engine for generating novel demonstrations." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.524, + 0.49, + 0.811 + ], + "angle": 0, + "content": "Based on that, we propose RoboSplat, a novel and efficacious approach to demonstration generation with Gaussian Splitting. Empowered by 3DGS, we achieve a high-fidelity reconstruction of the manipulation scene. In order to align the reconstructed scene with real-world counterparts, we devise a novel frame alignment pipeline leveraging differentiable rendering of Gaussian Splitting. 3D Gaussians of different scene components are segmented using off-the-shelf segmentation models and the robot United Robotics Description Format (URDF). Remarkably, as illustrated in Fig. 1, a single collected expert trajectory enables us to generate novel demonstrations across a wide range of visual domains. To be specific, RoboSplat augments data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.812, + 0.49, + 0.918 + ], + "angle": 0, + "content": "Compared to previous Real-to-Sim-to-Real and image augmentation approaches, RoboSplat achieves more diverse and spatially accurate data generation. Extensive real-world experiments demonstrate that RoboSplat significantly enhances the robustness of visuomotor policies against multiple disturbances across tasks involving pick and place, tool use, functional motion, articulated object manipulation, and long" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.072, + 0.92, + 0.131 + ], + "angle": 0, + "content": "horizon skills. Specifically, compared to policies trained on hundreds of real-world demonstrations that are further enriched with 2D data augmentation, our method increases the average success rate from \\(57.2\\%\\) to \\(87.8\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.646, + 0.148, + 0.784, + 0.161 + ], + "angle": 0, + "content": "II. RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.172, + 0.827, + 0.187 + ], + "angle": 0, + "content": "A. Generalizable Policy in Robot Manipulation" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.194, + 0.92, + 0.435 + ], + "angle": 0, + "content": "Recent advancements in manipulation have significantly enhanced generalization. Some studies design the policy architecture to endow it with equivariant properties, which is helpful to generalizing to different object poses [60, 61, 43, 13]. One-shot imitation learning approaches like [54, 48, 6, 53, 70] enable the policy to handle various object poses given only one demonstration. Furthermore, some other work focuses on generalizing the policy to different camera views [69, 46, 63], scene appearance [30, 51], and embodiments [12]. Some studies exploit the power of Large Language Models (LLMs) and Vision Language Models (VLMs) to endow robots with generalization abilities [23, 7, 39, 14]. Instead of adopting generalizable policy architecture, auxiliary learning objectives and powerful foundation models, our work is concentrated on generating high-quality, diverse, and realistic data to instill generalization abilities to the learned policy." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.451, + 0.797, + 0.465 + ], + "angle": 0, + "content": "B. Data Augmentation for Policy Learning" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.473, + 0.92, + 0.818 + ], + "angle": 0, + "content": "Given limited training data, data augmentation emerges as a way to improve the robustness of the policy. Previous work adopts image augmentation techniques to improve the resistance of visuomotor policies to observation noises [29, 28, 36, 37, 15, 19, 20]. However, these methods are mainly evaluated in simulated environments. To deploy learned policies in real-world setting, some previous work focuses on augmenting the appearance of the scene by incorporating image-inpainting models [67, 10, 9, 35]. Moreover, Tian et al. [50] generate augmented task demonstrations from different camera views and aim to learn a view-invariant policy. Ameperosa et al. [3]. Chen et al. [8] further devise a cross-embediment pipeline by inpainting different robots to image observations. Nonetheless, these studies mainly augment task demonstrations on 2D images, which lack spatial information. Hence, only limited augmentation can be achieved, and the augmented demonstrations might be unrealistic compared to those generated directly from 3D representations. Our work reconstructs the scene with 3D Gaussian Splatting and edits the 3D representation for data augmentation, enabling our policy to achieve comprehensive generalization across object poses, object types, camera views, lighting conditions, scene appearance, and various embodiments.." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.835, + 0.74, + 0.849 + ], + "angle": 0, + "content": "C. Gaussian Splitting in Robotics" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.857, + 0.92, + 0.918 + ], + "angle": 0, + "content": "3D Gaussian Splatting (3DGS) [25] serves as an explicit radiance field representation for real-time rendering of 3D scenes. Previous work leverages 3DGS to select proper grasp poses [24, 71]. Furthermore, Lu et al. [34] exploit 3DGS to" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.069, + 0.699, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.254, + 0.699, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.074, + 0.904, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.411, + 0.924, + 0.472 + ], + "angle": 0, + "content": "Fig. 2: Method overview. We start from a single manually collected demonstration and multi-view images that capture the whole scene. The former provides task-related keyframes, while the latter helps scene reconstruction. After aligning the reconstructed frame with the real-world frame and segmenting different scene components, we carry out autonomous editing of the scene in pursuit of six types of augmentation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.491, + 0.491, + 0.718 + ], + "angle": 0, + "content": "construct dynamics of the scene for multi-task robot manipulation. In order to predict the consequence of robots' interactions with the environment, Shorinwa et al. [47] leverage 3D semantic masking and infilling to visualize the motions of the objects that result from the interactions. Another line of work adopts the Real-to-Sim-to-Real pipeline, and utilizes 3DGS to reconstruct the real-world scene [31, 40, 56, 52]. However, importing reconstructed real-world objects to simulation is a strenuous process, and physical interactions tend to suffer from large sim-to-real gaps due to the flawed geometric reconstruction and lack of physical information in 3D reconstruction. Some recent work on 3DGS is centered around editing and relighting of the scene [65, 32, 17]. Our method enables autonomous editing of the reconstructed scene to generate diverse demonstrations with various configurations." + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.729, + 0.357, + 0.743 + ], + "angle": 0, + "content": "III. PRELIMINARIES" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.75, + 0.491, + 0.886 + ], + "angle": 0, + "content": "3D Gaussian Splatting (3DGS) [25] utilizes multi-view images for high-fidelity scene reconstruction. The scene is represented by a set of Gaussians \\(\\{g_i\\}_{i=1}^N\\), where each Gaussian \\(g_i\\) consists of a position vector \\(\\mu_i \\in \\mathbb{R}^3\\), a rotation matrix \\(R_i \\in \\mathbb{R}^{3 \\times 3}\\), a scaling matrix \\(S_i = \\text{diag}(s)(s \\in \\mathbb{R}^3)\\), an opacity factor \\(\\alpha_i \\in \\mathbb{R}\\), and spherical harmonic coefficients \\(c_i\\) that encapsulate the view-dependent color appearance of the Gaussian. Given the scaling matrix and rotation matrix, the covariance matrix \\(\\Sigma_i\\) is calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.219, + 0.899, + 0.347, + 0.918 + ], + "angle": 0, + "content": "\\[\n\\Sigma_ {i} = R _ {i} S _ {i} S _ {i} ^ {\\top} R _ {i} ^ {\\top}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.49, + 0.922, + 0.551 + ], + "angle": 0, + "content": "To derive the color \\( C \\) of a particular pixel during rendering procedure, 3DGS exploits a typical neural point-based approach, similar to Kopanas et al. [27], where the final color value is calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.57, + 0.807, + 0.611 + ], + "angle": 0, + "content": "\\[\nC = \\sum_ {i = 1} ^ {N} c _ {i} o _ {i} \\prod_ {j = 1} ^ {j = i - 1} (1 - o _ {j}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.613, + 0.808, + 0.642 + ], + "angle": 0, + "content": "\\[\no _ {i} = \\alpha_ {i} \\cdot \\exp \\left(\\frac {1}{2} \\delta_ {i} ^ {\\intercal} \\Sigma_ {i, 2 D} ^ {- 1} \\delta_ {i}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.646, + 0.922, + 0.737 + ], + "angle": 0, + "content": "where \\(N\\) is the number of Gaussians that overlap with the pixel. Besides, \\(\\alpha_{i}\\) denotes the opacity of the \\(i\\)-th Gaussian. \\(\\delta_{i} \\in \\mathbb{R}^{2}\\) denotes the offset between the current pixel and the center of the \\(i\\)-th Gaussian projected to 2D image. \\(\\Sigma_{i,2D} \\in \\mathbb{R}^{2 \\times 2}\\) stands for the covariance matrix of the \\(i\\)-th Gaussian projected to 2D image." + }, + { + "type": "title", + "bbox": [ + 0.639, + 0.746, + 0.786, + 0.76 + ], + "angle": 0, + "content": "IV. METHODOLOGY" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.766, + 0.922, + 0.918 + ], + "angle": 0, + "content": "To generate high-fidelity and diverse data from a single expert trajectory, we present RoboSplat, a novel demonstration generation approach based on 3DGS. An overview of our method is shown in Fig. 2. In this section, we describe RoboSplat in detail. We begin with the process of reconstruction and preprocessing in Sec. IV-A, which includes object and scene reconstruction, frame alignment with differentiable rendering, and novel pose generation for the robot and objects. With all the Gaussian models ready, we generate novel demonstrations and perform data augmentation in terms of object" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.074, + 0.072, + 0.493, + 0.148 + ], + "angle": 0, + "content": "poses, object types, camera views, scene appearance, lighting conditions, and embodiments, as described in Sec. IV-B. Finally, a visuomotor policy is trained on the augmented demonstrations and directly deployed on real robots, as detailed in Sec. IV-C." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.159, + 0.332, + 0.174 + ], + "angle": 0, + "content": "A. Reconstruction and Preprocessing" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.178, + 0.492, + 0.375 + ], + "angle": 0, + "content": "In pursuit of a high-fidelity reconstruction of the scene, we first capture a set of RGB images whose corresponding viewpoints should be as various as possible. During this process, the scene remains static and the robot is fixed at its default joint configuration, which we refer to as \\( q_{\\mathrm{default}} \\). With the images ready, we utilize COLMAP [45, 44] to obtain a sparse scene reconstruction and an estimation of the camera pose corresponding to each image. To further enhance the reconstruction precision, we gain an depth estimation for each image with Depth Anything [62]. The images, camera poses, and depth prior serve as inputs to 3DGS [25], which returns 3D Gaussians representing the entire scene \\( \\mathcal{G}_{\\mathrm{scene}} \\), which contains 3D Gaussians corresponding to the robot, dubbed \\( \\mathcal{G}_{\\mathrm{robot}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.375, + 0.492, + 0.435 + ], + "angle": 0, + "content": "However, the reconstructed 3D Gaussians of the robot are represented in an arbitrary frame \\(\\mathcal{F}_{\\mathrm{scene}}\\), and hence we need to align it with the real-world coordinate frame \\(\\mathcal{F}_{\\mathrm{real}}\\) to facilitate automated editing." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.436, + 0.493, + 0.661 + ], + "angle": 0, + "content": "The robot URDF gives us access to the robot base frame \\(\\mathcal{F}_{\\mathrm{URDF}}\\). The real-world robot frame \\(\\mathcal{F}_{\\mathrm{robot}}\\), \\(\\mathcal{F}_{\\mathrm{URDF}}\\), and \\(\\mathcal{F}_{\\mathrm{real}}\\) are all aligned with each other. Hence, the actual problem turns into the frame alignment from \\(\\mathcal{F}_{\\mathrm{scene}}\\) to \\(\\mathcal{F}_{\\mathrm{URDF}}\\). We denote the transformation matrix as \\(\\mathcal{T}_{\\mathrm{URDF, scene}}\\). While point cloud registration approaches, such as Iterative Closest Point (ICP) [5], serve as a common solution to it, we find that there is still major misalignment between the two frames aligned with point cloud registration, as illustrated in Fig. 3. The reason lies in the fact that point cloud registration is based on point coordinates, whereas 3D Gaussians have a scale attribute, which causes a mismatch between point coordinates and the appearance. Therefore, we exploit the differentiable rendering of 3DGS to do further fine-grained alignment, as depicted in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.662, + 0.493, + 0.846 + ], + "angle": 0, + "content": "Suppose \\(\\hat{\\mathcal{T}}_{\\mathrm{URDF, scene}}^{0}\\) is the initial transformation matrix obtained through ICP. We first apply \\(\\hat{\\mathcal{T}}_{\\mathrm{URDF, scene}}^{0}\\) to \\(\\mathcal{G}_{\\mathrm{robot}}\\) leading to a partially aligned robot Gaussian \\(\\hat{\\mathcal{G}}_{\\mathrm{robot}}\\). The aim of further alignment is to derive another transformation matrix \\(\\hat{\\mathcal{T}}_{\\mathrm{rel}}\\), such that applying \\(\\hat{\\mathcal{T}}_{\\mathrm{rel}}\\) to \\(\\hat{\\mathcal{G}}_{\\mathrm{robot}}\\) gives a better alignment to the pose of the robot defined in URDF. For this sake, we select \\(N\\) canonical camera views to capture the segmentation masks \\(\\{\\mathcal{I}_i^{\\mathrm{URDF}}\\}_{i = 1}^N\\) and \\(\\{\\mathcal{I}_i^{\\mathrm{Gaussian}}\\}_{i = 1}^N\\) (the pixel value is 1 if it belongs to the robot; otherwise, it is 0) with the robot URDF and \\(\\hat{\\mathcal{G}}_{\\mathrm{robot}}\\) respectively. The pixel-wise differences between the images from the same canonical views are averaged to form the objective function of alignment:" + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.856, + 0.407, + 0.897 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {a l i g n}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left(\\mathcal {I} _ {i} ^ {\\text {U R D F}} - \\mathcal {I} _ {i} ^ {\\text {G a u s s i a n}}\\right) ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.902, + 0.492, + 0.918 + ], + "angle": 0, + "content": "Due to the differentiability of Gaussian Splitting, we can" + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.065, + 0.872, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.198, + 0.925, + 0.306 + ], + "angle": 0, + "content": "Fig. 3: Comparison of frame alignment results between ICP and fine-grained optimization with differentiable rendering. The semi-transparent orange overlay represents the ground truth rendered with URDF from the same camera view. The left shows the results of ICP, which have larger errors, while the right shows the results after further fine-grained optimization using differentiable rendering." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.32, + 0.92, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.527, + 0.922, + 0.618 + ], + "angle": 0, + "content": "Fig. 4: Illustration of frame alignment with differentiable rendering. The loss is calculated between the mask rendered using Gaussian Splatting and the mask rendered with URDF. Subsequently, backpropagation and gradient descent are used to optimize the translation, rotation, and scale, which are then applied to the 3D Gaussians." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.627, + 0.921, + 0.704 + ], + "angle": 0, + "content": "rewrite the objective function as \\(\\mathcal{L}_{\\mathrm{align}}(\\hat{T}_{\\mathrm{rel}})\\) and optimize \\(\\hat{T}_{\\mathrm{rel}}\\) through gradient descent. The optimized \\(\\hat{T}_{\\mathrm{rel}}\\) is composed with \\(\\hat{T}_{\\mathrm{URDF, scene}}^{0}\\), the result of which is applied to \\(\\mathcal{G}_{\\mathrm{scene}}\\) to form the scene reconstruction in \\(\\mathcal{F}_{\\mathrm{real}}\\). We refer to the aligned 3D Gaussians as \\(\\mathcal{G}_{\\mathrm{scene}}^{*}\\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.705, + 0.922, + 0.92 + ], + "angle": 0, + "content": "In order to decompose the scene into different parts, we first leverage Grounded-SAM [41] to perform task-related object segmentation. Then, the masked images are used to reconstruct 3D Gaussians for the objects. The 3D Gaussians corresponding to each link of the robot are segmented using the point cloud of each link in \\(\\mathcal{F}_{\\mathrm{URDF}}\\), which can be obtained with the robot's URDF and the renderer. Specifically, if the position of a 3D Gaussian is within a threshold distance from the point cloud of a link, the 3D Gaussian is assigned to that link. If a 3D Gaussian does not belong to any object or any link of the robot, it is classified as background. We suppose that the robot has \\(l\\) links and there are totally \\(k\\) objects in the scene. The reconstructed robot links, objects, and background are denoted as \\(\\mathcal{G}_{\\mathrm{robot}}^* = \\{\\mathcal{G}_{\\mathrm{robot},i}^*\\}_{i=1}^l\\), \\(\\mathcal{G}_{\\mathrm{obj}}^* = \\{\\mathcal{G}_{\\mathrm{obj},j}^*\\}_{j=1}^k\\), and \\(\\mathcal{G}_{\\mathrm{bg}}^*\\)" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.075, + 0.072, + 0.162, + 0.086 + ], + "angle": 0, + "content": "respectively." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.087, + 0.492, + 0.162 + ], + "angle": 0, + "content": "Similar to our frame alignment strategy, we utilize differentiable rendering to estimate the deployed camera poses in order to narrow the gap between the generated data and the deployment environment. The camera extrinsics are optimized through gradient descent, with the optimization objective:" + }, + { + "type": "equation", + "bbox": [ + 0.162, + 0.177, + 0.407, + 0.195 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {c a m e r a}} = S S I M \\left(\\mathcal {I} _ {\\text {E x p e r t}}, \\mathcal {I} _ {\\text {G a u s s i a n}}\\right) ^ {2},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.202, + 0.491, + 0.277 + ], + "angle": 0, + "content": "where \\(\\mathcal{I}_{\\mathrm{Expert}}\\) denotes the image obtained from the collected expert demonstration, \\(\\mathcal{I}_{\\mathrm{Gaussian}}\\) represents the rendered image with reconstructed 3D Gaussians, and SSIM refers to Structural Similarity, which measures the perceptual similarity between two images." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.278, + 0.492, + 0.475 + ], + "angle": 0, + "content": "Nonetheless, before moving on to novel demonstration generation, we need to figure out how to generate 3D Gaussians for the robot under novel joint configurations. To achieve that, we leverage the link-wise Gaussians \\(\\{\\mathcal{G}_{\\mathrm{robot},i}^{*}\\}_{i = 1}^{l}\\) and the default joint configuration \\(q_{\\mathrm{default}}\\). For each link \\(1 \\leqslant i \\leqslant l\\), we access its relative pose to robot base frame under arbitrary joint configuration \\(q\\) through forward kinematics, denoted as \\(\\mathcal{T}_{\\mathrm{fk}}^i(q)\\). Hence, by transforming each link \\(i\\) with \\(\\mathcal{T}_{\\mathrm{fk}}^i(q)\\mathcal{T}_{\\mathrm{fk}}^i(q_{\\mathrm{default}})^{-1}\\), we derive the corresponding 3D Gaussians under configuration \\(q\\). The entire 3D Gaussians are thereby derived by composing Gaussians of all \\(l\\) links. As for the manipulated objects, we apply transformations in a similar manner. The way 3D Gaussians are transformed is detailed in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.487, + 0.325, + 0.501 + ], + "angle": 0, + "content": "B. Novel Demonstration Generation" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.507, + 0.491, + 0.582 + ], + "angle": 0, + "content": "Utilizing 3D Gaussians in \\(\\mathcal{F}_{\\mathrm{real}}\\), we implement our demonstration augmentation process, which systematically enhances the expert demonstration \\(\\mathcal{D}_{\\mathrm{expert}}\\) across six aspects: object poses, object types, camera views, embodiment types, scene appearance, and lighting conditions." + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.584, + 0.2, + 0.598 + ], + "angle": 0, + "content": "1) Object Pose" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.599, + 0.491, + 0.78 + ], + "angle": 0, + "content": "To perform object pose augmentation, we first extract keyframes from the expert demonstration using a heuristic approach. Whenever the gripper action toggles or joint velocities approach zero, we consider the current time step as a keyframe and record the end-effector pose with respect to robot base frame. After that, we apply rigid transformations to the target objects that are involved in the expert demonstration. The end-effector poses at keyframes are transformed equivariantly according to the target object. Eventually, we generate trajectories between consecutive keyframe poses with motion planning, the combination of which makes a complete augmented demonstration with novel object poses." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.781, + 0.2, + 0.796 + ], + "angle": 0, + "content": "2) Object Type" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.796, + 0.492, + 0.919 + ], + "angle": 0, + "content": "The object types can be augmented with 3D Content Generation. We first prompt GPT-4 [2] to generate approximately 50 names of objects that can be grasped. Then, we use these object names as prompts to generate corresponding 3D Gaussians with a 3D content generation model [57]. We utilize an off-the-shelf grasping algorithm [16] to generate grasp poses with respect to the object frame. As we generate different object poses for augmentation, we obtain the corresponding" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.072, + 0.923, + 0.132 + ], + "angle": 0, + "content": "end-effector poses by composing object pose and the grasp pose relative to the object, which turn into the keyframe poses in new demonstrations. The entire augmented trajectory is generated in the same manner as IV-B1." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.138, + 0.64, + 0.152 + ], + "angle": 0, + "content": "3) Camera View" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.156, + 0.923, + 0.415 + ], + "angle": 0, + "content": "One merit of 3DGS lies in its ability to perform novel view synthesis. Thereby, we are able to choose different camera poses from \\( \\mathcal{D}_{\\mathrm{expert}} \\) and obtain novel-view demonstrations. Although we can render novel-view observations from arbitrary camera pose, we need to ensure that the augmented camera view does not deviate so much from the expert that it loses sight of the manipulation scene. Hence, we first designate a target point \\( O_{c} = (x_{c},y_{c},z_{c}) \\) in \\( \\mathcal{F}_{\\mathrm{real}} \\), towards which the camera should face during the entire episode. We then define a coordinate frame \\( \\mathcal{F}_c \\), whose origin is \\( O_{c} \\) and orientation is the same as \\( \\mathcal{F}_{\\mathrm{real}} \\). The position of camera is represented by spherical coordinates \\( (r,\\theta ,\\varphi) \\) in \\( \\mathcal{F}_c \\). Thus, by limiting the target point within the manipulation scene and randomizing the spherical coordinates, we are able to generate camera poses that produce meaningful observations yet possess diversity. The hyperparameters of randomization for the target point and the spherical coordinates are detailed in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.42, + 0.669, + 0.435 + ], + "angle": 0, + "content": "4) Embodiment Type" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.439, + 0.923, + 0.635 + ], + "angle": 0, + "content": "To generalize the expert demonstration to different types of robots, we replace \\(\\mathcal{G}_{\\mathrm{robot}}^*\\) with the 3D Gaussians of another embodiment, dubbed \\(\\mathcal{G}_{\\mathrm{robot}}^{\\mathrm{new}}\\), which is attained from the corresponding URDF file or real-world reconstruction. The keyframe end-effector poses are reused because they are embodiment-agnostic action representations. Hence, through motion planning, we can easily derive the end-effector poses and joint positions of the new embodiment for all time steps in augmented demonstrations. The 3D Gaussians of the new embodiment under novel joint configurations is obtained from \\(\\mathcal{G}_{\\mathrm{robot}}^{\\mathrm{new}}\\) as mentioned in Sec. IV-A. The policy trained on these augmented demonstrations is directly deployed on novel embodiments." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.641, + 0.671, + 0.656 + ], + "angle": 0, + "content": "5) Scene Appearance" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.66, + 0.923, + 0.919 + ], + "angle": 0, + "content": "Inconsistency between scene appearance accounts for a large visual gap between training and deployment environments. To resolve this issue, we propose to exploit reconstructed diverse 3D scenes and also large-scale image datasets to augment the scene appearance. We adopt COCO [33] as the image dataset, and attach images to the table top and background 3D Gaussian planes that surround the entire manipulation scene. Moreover, we gather datasets for 3D reconstruction [22, 66, 26, 4], and derive corresponding 3D Gaussians by 3DGS training. The resulting 3D Gaussian scenes substitute for \\(\\mathcal{G}_{\\mathrm{bg}}^*\\), forming novel scene appearance for data augmentation. The edge of utilizing reconstructed 3D scenes is their consistent and diverse geometry across multiple camera views, which helps produce more realistic demonstrations. Nevertheless, due to the expense of 3DGS training on large-scale reconstruction datasets, we complement them with 2D images for greater appearance diversity." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.072, + 0.245, + 0.086 + ], + "angle": 0, + "content": "6) Lighting Condition" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.087, + 0.493, + 0.327 + ], + "angle": 0, + "content": "Discrepancy in lighting conditions is another barrier to deploying trained policy in unseen scenarios. To compensate for that, we augment the diffuse color of each Gaussian in the reconstructed scene through random scaling, offset, and noise. Concretely, for a Gaussian with original diffuse color \\((r,g,b)\\), the augmented diffuse color values can be expressed as \\((s_r r + o_r + \\Delta_r, s_g g + o_g + \\Delta_g, s_b b + o_b + \\Delta_b)\\), where \\((s_r, s_g, s_b)\\) stand for scaling factors, \\((o_r, o_g, o_b)\\) stand for offsets, and \\((\\Delta_r, \\Delta_g, \\Delta_b)\\) stand for random Gaussian noise. The scaling factors and offsets simulate changes in color contrast and scene brightness. Thus, they are shared among all the Gaussians in the scene. On the other hand, the random Gaussian noise is sampled independently for each Gaussian to simulate noise in images captured by cameras. The details of scaling factors, offsets, and Gaussian noise are elaborated in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.328, + 0.492, + 0.359 + ], + "angle": 0, + "content": "An illustration of augmented demonstrations with six types of generalizations can be found in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.365, + 0.207, + 0.381 + ], + "angle": 0, + "content": "C. Policy Training" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.385, + 0.491, + 0.566 + ], + "angle": 0, + "content": "We employ a modern, widely adopted transformer-based architecture [18, 51, 38, 55] to serve as the policy network, which is detailed in Appendix C. We process RGB images with ResNet-18 [21], and encode joint state using a multilayer perceptron (MLP). The latent of images and robot state is fed into a transformer encoder. Finally, an action decoder utilizes an MLP to convert the action latent into the action vector \\(a_{t}\\). The policy is trained with Behavioural Cloning (BC) in an end-to-end manner, aiming to maximize the likelihood of expert actions in demonstrations. We denote \\(o_k \\triangleq (I_k, q_k)\\) as the observation at the \\(k\\)-th frame of demonstrations \\(\\mathcal{D}\\), and \\(\\pi\\) as our policy. The loss function can then be expressed as" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.579, + 0.399, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} ^ {\\mathrm {B C}} = \\mathbb {E} _ {(o _ {k}, a _ {k}) \\sim \\mathcal {D}} \\| a _ {k} - \\pi (o _ {k}) \\| ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.604, + 0.492, + 0.695 + ], + "angle": 0, + "content": "Specifically, \\( I_{k} \\) consists of two images from different eye-on-base cameras. We adopt relative end-effector pose as the action representation, which depicts the relative transformation between two consecutive end-effector poses under robot base frame. Further details of the training process can be found in Appendix D." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.703, + 0.348, + 0.716 + ], + "angle": 0, + "content": "V. EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.721, + 0.491, + 0.781 + ], + "angle": 0, + "content": "We conduct comprehensive experiments in the real world to verify the effectiveness of our demonstration generation pipeline. Specifically, we aim to answer: given a single expert demonstration and multi-view images of the scene," + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.781, + 0.49, + 0.811 + ], + "angle": 0, + "content": "1) How efficient is data generation compared to manually collecting data?" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.811, + 0.49, + 0.856 + ], + "angle": 0, + "content": "2) How does the policy trained on generated demonstrations perform across various tasks compared to that trained on manually collected data?" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.856, + 0.49, + 0.886 + ], + "angle": 0, + "content": "3) How does the policy perform as the generated data scale up?" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.886, + 0.492, + 0.918 + ], + "angle": 0, + "content": "4) Can generated demonstrations enhance the robustness of the policy when facing various deployment settings, such" + }, + { + "type": "list", + "bbox": [ + 0.075, + 0.781, + 0.492, + 0.918 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.074, + 0.909, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.287, + 0.922, + 0.332 + ], + "angle": 0, + "content": "Fig. 5: Real-world experiment setup. We employ a Franka Research 3 Robot and two eye-on-base RealSense D435i cameras." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.35, + 0.921, + 0.381 + ], + "angle": 0, + "content": "as changes in object types, camera views, scene appearance, lighting conditions, and embodiment types?" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.396, + 0.664, + 0.411 + ], + "angle": 0, + "content": "A. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.417, + 0.921, + 0.538 + ], + "angle": 0, + "content": "The real-world experiment setup is presented in Fig. 5. Concretely, we collect the expert demonstration on Franka Research 3 (FR3) Robot. Two Intel Realsense D435i eye-on-base cameras are mounted on the table top, capturing RGB image observations for the policy. We employ a 3D SpaceMouse to collect teleoperated demonstrations at a frequency of \\(10\\mathrm{Hz}\\). Policy inference is carried out on an NVIDIA RTX4090 GPU, with a latency of 0.1s imposed." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.539, + 0.92, + 0.599 + ], + "angle": 0, + "content": "In order to manifest the generalization ability of our pipeline to different task settings, we select five tasks for evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place, and Sweep." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.6, + 0.922, + 0.918 + ], + "angle": 0, + "content": "In Pick Object task, the policy picks up a target object which is placed at different poses within a \\(30\\mathrm{cm}\\times 40\\mathrm{cm}\\) workspace. In CloseDrawer task, the policy closes a drawer whose position is constrained to a \\(15\\mathrm{cm}\\times 40\\mathrm{cm}\\) workspace, while its rotation about the z-axis is restricted to \\(\\left[-\\frac{\\pi}{8},\\frac{\\pi}{8}\\right]\\). In Pick-Place-Close task, the policy is expected to grasp an object, place it in the drawer, and then close the drawer. The drawer is placed in a \\(5\\mathrm{cm}\\times 5\\mathrm{cm}\\) workspace, with a fixed orientation. The target object is located in a \\(10\\mathrm{cm}\\times 10\\mathrm{cm}\\) workspace, whose rotation falls into range \\(\\left[-\\frac{\\pi}{8},\\frac{\\pi}{8}\\right]\\). In Dual Pick-Place task, the policy attempts to pick two target objects in a row and place them in a fixed drawer. Both of the objects are located in \\(10\\mathrm{cm}\\times 10\\mathrm{cm}\\) workspaces, with yaw angles between \\(-\\frac{\\pi}{8}\\) and \\(\\frac{\\pi}{8}\\). In Sweep task, the robot should first pick up a broom and then sweeps the chocolate beans into a dustpan. The broom is randomly placed within a \\(10\\mathrm{cm}\\times 10\\mathrm{cm}\\) area, and the chocolate beans are randomly placed on the chopping board. Task setups are illustrated in Fig. 6. These five tasks require proficiency in executing basic pick-and-place actions, manipulating articulated objects, performing long-horizon tasks, and demonstrating skills involving tool use and" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.068, + 0.25, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.251, + 0.068, + 0.416, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.418, + 0.068, + 0.582, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.068, + 0.749, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.75, + 0.068, + 0.915, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.199, + 0.921, + 0.228 + ], + "angle": 0, + "content": "Fig. 6: Task illustration. We design five manipulation tasks for real-world evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place and Sweep, whose details are elaborated in Sec. V-A." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.242, + 0.492, + 0.271 + ], + "angle": 0, + "content": "functional motion. Together, they provide a comprehensive evaluation across various task settings." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.272, + 0.493, + 0.453 + ], + "angle": 0, + "content": "We also conduct extensive real-world experiments to prove the effectiveness of our data generation pipeline in terms of different types of generalization. Notably, the evaluation of object pose generalization is incorporated into all experiments, including those focused on the other five types of generalization (object types, camera views, embodiment types, lighting conditions, and scene appearance). This is because object pose generalization is a fundamental requirement for task completion ability. For the other five types of generalization, the details are provided in Sec. V-D. Success rate (SR) is chosen as the evaluation metric in all experiments. Each policy is evaluated with 30 trials for a certain evaluation setting." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.462, + 0.379, + 0.476 + ], + "angle": 0, + "content": "B. Efficiency of Augmenting Demonstrations" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.481, + 0.491, + 0.616 + ], + "angle": 0, + "content": "To answer Question 1, we need to justify that our pipeline is economical with both labor and time when generating data. The labor-saving property is obvious because demonstrations are generated automatically in our pipeline. We compare the average time consumption of manually collecting a real-world demonstration to that of generating a demonstration through our pipeline. Specifically, we adopt eight processes on an NVIDIA RTX 4090 GPU for paralleled data generation to efficiently utilize computational resources." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.617, + 0.492, + 0.737 + ], + "angle": 0, + "content": "The comparison study is conducted on all five tasks, and the result is shown in Table I. Our data generation pipeline that executed on a single GPU is more than 29 times faster than collecting data in the real world, with an average time consumption of 0.64s across all five tasks. With no human interference, our demonstration generation approach is able to generate visually diverse training data with little time expenditure." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.747, + 0.473, + 0.762 + ], + "angle": 0, + "content": "C. Performance of the Policy Trained on Augmented Data" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.766, + 0.491, + 0.841 + ], + "angle": 0, + "content": "To answer Question 2 and 3, we compare the policies trained on generated demonstrations and manually collected demonstrations in terms of their success rates when facing various object poses. Moreover, we explore the performance of policies as generated data gradually scale up." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.842, + 0.492, + 0.918 + ], + "angle": 0, + "content": "The main results of the experiment are illustrated in Fig. 7. While policies trained on real-world demonstrations still have an edge over those trained on the same number of generated ones, our method manifests salient improvement in success rate as the generated demonstrations scale up. Concretely," + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.242, + 0.923, + 0.438 + ], + "angle": 0, + "content": "visuomotor policies trained on 800 generated demonstrations achieve comparable performance to those trained on 200 manually collected demonstrations. Moreover, training with 1800 generated demonstrations raises the success rate to an average of \\(94.7\\%\\), significantly surpassing the success rate achieved with 200 manually collected demonstrations. It is also worth mentioning that the policy achieves a \\(96.7\\%\\) success rate on Dual Pick-Place task with our generated data, which is nearly \\(20\\%\\) higher than the baseline (manually collected). These findings testify the effectiveness of our method in generating novel object poses for better generalization of visuomotor policies, and indicate promising scaling property as generated data scales up." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.451, + 0.895, + 0.466 + ], + "angle": 0, + "content": "D. Robustness when Facing Various Deployment Settings" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.471, + 0.921, + 0.577 + ], + "angle": 0, + "content": "To answer Question 4, we augment the expert demonstration in five different dimensions: lighting conditions, scene appearance, camera views, object types, and embodiment types. We compare policies trained on real-world data, real-world data augmented using 2D augmentation approaches, and data generated via our pipeline. An illustration of the experiments for different generalization types is shown in Fig. 8." + }, + { + "type": "title", + "bbox": [ + 0.522, + 0.578, + 0.676, + 0.592 + ], + "angle": 0, + "content": "1) Lighting Condition" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.593, + 0.921, + 0.654 + ], + "angle": 0, + "content": "To demonstrate the effectiveness of lighting augmentation in our approach, we adopt five different scenarios for policy deployment, which are shown in Appendix E. We compare the performance of four policies that are trained respectively on:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.657, + 0.833, + 0.671 + ], + "angle": 0, + "content": "1) 200 real-world demonstrations (Collected);" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.672, + 0.922, + 0.717 + ], + "angle": 0, + "content": "2) 1800 generated demonstrations with only object pose augmentation, which are the same as data used in V-C (Ours Pose-Only);" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.717, + 0.921, + 0.747 + ], + "angle": 0, + "content": "3) real-world demonstrations augmented with color jitter (Color Jitter);" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.747, + 0.921, + 0.777 + ], + "angle": 0, + "content": "4) 3200 demonstrations generated by our pipeline with both lighting condition and object pose augmentation (Ours)." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.657, + 0.922, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.78, + 0.922, + 0.918 + ], + "angle": 0, + "content": "As shown in Fig. 9, policies trained on augmented lighting conditions achieve an average of over \\(80\\%\\) success rate across Pick Object, Close Driver, and Pick-Place-Close tasks, with an overall improvement over those trained on real-world data without augmentation by \\(70\\%\\). Furthermore, our policies show a significant edge over those trained on generated demonstrations with augmented object poses and real-world demonstrations augmented with color jitter, justifying the validity of lighting augmentation in our pipeline." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.075, + 0.066, + 0.922, + 0.098 + ], + "angle": 0, + "content": "TABLE I: Comparison of demonstration collection time (s). We calculate the average time cost of data collection of a single demonstration over 100 demonstrations. Our method achieves more than 29 times the speed compared to the baseline." + }, + { + "type": "table", + "bbox": [ + 0.127, + 0.104, + 0.86, + 0.168 + ], + "angle": 0, + "content": "
Task TypePick ObjectClose PrinterPick-Place-PrintDual Pick-PlaceSweepAverage
Real-world13.210.124.727.020.419.1
Ours0.430.340.861.00.580.64
" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.182, + 0.379, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.182, + 0.621, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.182, + 0.868, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.134, + 0.297, + 0.376, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.297, + 0.622, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.297, + 0.868, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.414, + 0.924, + 0.462 + ], + "angle": 0, + "content": "Fig. 7: Main results. Top left: We present the average success rate across five tasks. Our method shows promising scalability as the number of demonstration grows. The other five subfigures: For each task, we evaluate the success rate of policies trained from manually collected data and those generated by our method over 30 trials, using different number of demonstrations." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.48, + 0.242, + 0.494 + ], + "angle": 0, + "content": "2) Scene Appearance" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.494, + 0.492, + 0.734 + ], + "angle": 0, + "content": "Similar to the experiment on lighting conditions, we select five different scenarios for evaluation on scene appearance augmentation, which is illustrated in Appendix E. The four policies for comparison are trained in a similar manner as described in Sec. V-D1, with the key difference being that we employ image inpainting methods [68, 9, 67, 10] as more robust and suitable 2D augmentation baselines for appearance generalization. The results are shown in Fig. 9. The policy trained on data generated through our pipeline, incorporating both appearance and object pose augmentations, achieves superior performance compared to all baselines. Notably, it demonstrates over a \\(70\\%\\) increase in success rates across all three tasks when compared to policies trained on data without appearance augmentation. In particular, our policy achieves \\(100\\%\\) success rate on the Pick Object task, showcasing strong robustness against various background appearance." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.735, + 0.21, + 0.749 + ], + "angle": 0, + "content": "3) Camera View" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.75, + 0.491, + 0.856 + ], + "angle": 0, + "content": "We employ two different settings for camera view generalization: novel view and moving view. In novel view experiments, we select 30 poses for each camera, which are different from the training perspective. On the other hand, cameras are kept moving in moving view experiments. Similar to Sec. V-D1 and Sec. V-D2, we compare the performance of four policies that are trained respectively on:" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.857, + 0.404, + 0.871 + ], + "angle": 0, + "content": "1) 200 real-world demonstrations (Collected);" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.871, + 0.49, + 0.901 + ], + "angle": 0, + "content": "2) 1800 generated demonstrations with only object pose augmentation (Ours Pose-Only);" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.902, + 0.491, + 0.916 + ], + "angle": 0, + "content": "3) 3200 demonstrations stemmed from 200 real-world" + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.857, + 0.491, + 0.916 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.479, + 0.922, + 0.524 + ], + "angle": 0, + "content": "demonstrations, augmented using VISTA [50], which leverages novel view synthesis models to augment data from different views;" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.525, + 0.921, + 0.555 + ], + "angle": 0, + "content": "4) 3200 generated demonstrations with camera view augmentation (Ours)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.558, + 0.922, + 0.68 + ], + "angle": 0, + "content": "We present the results in Table II. Our policy is able to perform Pick Object task and Pick-Place-Close task with success rates of over \\(80\\%\\) and \\(50\\%\\) respectively, while the policies trained on data without augmentation can barely accomplish the task. Our approach also outperforms VISTA by a large margin. Notably, our policy achieves nearly \\(100\\%\\) success rate on CloseDrawer task, manifesting strong robustness against novel camera views and moving cameras." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.682, + 0.63, + 0.696 + ], + "angle": 0, + "content": "4) Object Type" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.697, + 0.921, + 0.743 + ], + "angle": 0, + "content": "In order to demonstrate the effectiveness of our method in augmenting object types, we compare the performance of three different policies that are respectively trained on:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.746, + 0.92, + 0.775 + ], + "angle": 0, + "content": "1) 400 real-world demonstrations with 5 real-world objects (Collected);" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.777, + 0.921, + 0.836 + ], + "angle": 0, + "content": "2) 6400 demonstrations stemmed from 200 real-world demonstrations, augmented using ROSIE [67], which utilizes image inpainting models to generate data with unseen objects;" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.837, + 0.92, + 0.868 + ], + "angle": 0, + "content": "3) 6400 demonstrations generated by our pipeline with object type augmentation (Ours)." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.746, + 0.921, + 0.868 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.871, + 0.922, + 0.918 + ], + "angle": 0, + "content": "During deployment, we select five real-word objects that are different from all the objects covered in training process. We report the result in Fig. 10. The policy trained on 50 object" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.069, + 0.885, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.303, + 0.924, + 0.351 + ], + "angle": 0, + "content": "Fig. 8: Illustration of real-world experiments for different generalization types. The data is collected in the original setting. When deploying the trained policy, we modify object poses, lighting conditions, scene appearance, camera views, object types, and embodiments to evaluate the robustness in different scenarios." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.365, + 0.51, + 0.594 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.363, + 0.895, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.602, + 0.924, + 0.651 + ], + "angle": 0, + "content": "Fig. 9: Performance when changing lighting conditions and appearance. We report the success rate of different policies under various lighting conditions and appearance. The policies trained with generated demonstrations with corresponding augmentations manifest remarkable advance compared to baseline policies." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.664, + 0.494, + 0.741 + ], + "angle": 0, + "content": "types showcases better adaptability to novel object types, improving the success rate of baseline models by over \\(40\\%\\). This demonstrates the effectiveness of our data generation pipeline in utilizing off-the-shelf 3D Content Generation models to generalize policy to novel objects." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.757, + 0.473, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.875, + 0.493, + 0.92 + ], + "angle": 0, + "content": "Fig. 10: Performance on novel object types. The policy trained on data generated by RoboSplat shows a salient edge over baseline policies." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.664, + 0.669, + 0.68 + ], + "angle": 0, + "content": "5) Embodiment Type" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.685, + 0.923, + 0.881 + ], + "angle": 0, + "content": "Our method supports generating demonstrations across different embodiment types as mentioned in Sec. IV-B4. To prove that, based on one demonstration collected with the Franka Research 3, we generate novel demonstrations for a UR5e robot equipped with a Robotiq 2F-85 gripper and deploy the learned policy directly in the real world. It is worth noting that policies trained on Franka Research 3 robot demonstrations fail to be deployed on UR5e robot due to frequent safety violations. We compare the performance of policies trained on embodiment-augmented demonstrations with those trained on data augmented using RoVi-Aug [8]. RoVi-Aug modifies real-world demonstrations by replacing the appearance of the embodiment through generative models." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.886, + 0.923, + 0.918 + ], + "angle": 0, + "content": "We present the performance of policies in Fig. 11. Policies trained on data generated using our pipeline achieve a success" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.074, + 0.067, + 0.924, + 0.113 + ], + "angle": 0, + "content": "TABLE II: Performance when changing camera view. We compare the success rate of different policies under two circumstances: novel camera view and moving camera view. The policies trained on demonstrations augmented using our approach showcase significant improvement over baseline policies." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.119, + 0.924, + 0.233 + ], + "angle": 0, + "content": "
Data SourcePick ObjectClose PrinterPick-Place-CloseAverage
Novel ViewMoving ViewNovel ViewMoving ViewNovel ViewMoving View
Collected6.70.016.713.30.00.06.1
Ours Pose-Only0.00.026.730.00.00.09.5
VISTA [50]33.333.356.770.033.316.740.6
Ours90.086.7100.096.753.356.780.6
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.261, + 0.492, + 0.322 + ], + "angle": 0, + "content": "rate close to \\(100\\%\\) on an embodiment different from the one used for demonstration collection. This result highlights its superior performance compared to the baseline in cross-embodiment transfer." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.336, + 0.489, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.467, + 0.493, + 0.529 + ], + "angle": 0, + "content": "Fig. 11: Performance on cross embodiment experiments. We evaluate the learned policy directly on the UR5e robot and achieve a nearly \\(100\\%\\) success rate that surpasses the 2D augmentation methods." + }, + { + "type": "title", + "bbox": [ + 0.219, + 0.559, + 0.348, + 0.572 + ], + "angle": 0, + "content": "VI. LIMITATIONS" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.58, + 0.491, + 0.686 + ], + "angle": 0, + "content": "Due to the limitations of naive 3D Gaussian Splatting, it is incapable of handling deformable objects. Additionally, the pipeline lacks physical constraints, making it unsuitable for contact-rich and dynamic tasks. However, recent advancements in Gaussian Splatting [58, 1, 64, 42] provide promising opportunities to address these challenges. Future work could apply these techniques to generate data for a wider range of tasks." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.697, + 0.353, + 0.71 + ], + "angle": 0, + "content": "VII. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.718, + 0.492, + 0.809 + ], + "angle": 0, + "content": "In this work, we introduce RoboSplat, a novel demonstration generation approach that requires only a single collected demonstration and generates diverse and high-quality data for policy learning. Comprehensive real-world experiments show that our approach significantly enhances the robustness of visuomotor policies when encountering various disturbances." + }, + { + "type": "title", + "bbox": [ + 0.206, + 0.821, + 0.362, + 0.833 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.841, + 0.493, + 0.918 + ], + "angle": 0, + "content": "We sincerely thank Yang Tian and Xiao Chen for their fruitful discussions. This work is supported by the National Key R&D Program of China (2022ZD0160201), Shanghai Artificial Intelligence Laboratory, and China Postdoctoral Science Foundation (2023M741848)." + }, + { + "type": "title", + "bbox": [ + 0.666, + 0.262, + 0.761, + 0.274 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.283, + 0.922, + 0.358 + ], + "angle": 0, + "content": "[1] Jad Abou-Chakra, Krishan Rana, Feras Dayoub, and Niko Suenderhauf. Physically embodied gaussian splatt-ting: A visually learnt and physically grounded 3d representation for robotics. In 8th Annual Conference on Robot Learning, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.359, + 0.922, + 0.433 + ], + "angle": 0, + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.434, + 0.921, + 0.493 + ], + "angle": 0, + "content": "[3] Ezra Ameperosa, Jeremy A Collins, Mrinal Jain, and Animesh Garg. Rocoda: Counterfactual data augmentation for data-efficient robot learning from demonstrations. arXiv preprint arXiv:2411.16959, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.494, + 0.922, + 0.569 + ], + "angle": 0, + "content": "[4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5470–5479, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.57, + 0.921, + 0.628 + ], + "angle": 0, + "content": "[5] Paul J Besl and Neil D McKay. Method for registration of 3-d shapes. In Sensor fusion IV: control paradigms and data structures, volume 1611, pages 586-606. Spie, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.63, + 0.921, + 0.719 + ], + "angle": 0, + "content": "[6] Ondrej Biza, Skye Thompson, Kishore Reddy Pagidi, Abhinav Kumar, Elise van der Pol, Robin Walters, Thomas Kipf, Jan-Willem van de Meent, Lawson LS Wong, and Robert Platt. One-shot imitation learning via interaction warping. arXiv preprint arXiv:2306.12392, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.721, + 0.921, + 0.809 + ], + "angle": 0, + "content": "[7] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.811, + 0.921, + 0.901 + ], + "angle": 0, + "content": "[8] Lawrence Yunliang Chen, Chenfeng Xu, Karthik Dharmarajan, Muhammad Zubair Irshad, Richard Cheng, Kurt Keutzer, Masayoshi Tomizuka, Quan Vuong, and Ken Goldberg. Rovi-aug: Robot and viewpoint augmentation for cross-embodiment robot learning. arXiv preprint arXiv:2409.03403, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.902, + 0.921, + 0.918 + ], + "angle": 0, + "content": "[9] Zoey Chen, Sho Kiami, Abhishek Gupta, and Vikash" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.283, + 0.922, + 0.918 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.072, + 0.491, + 0.116 + ], + "angle": 0, + "content": "Kumar. Genaug: Retargeting behaviors to unseen situations via generative augmentation. arXiv preprint arXiv:2302.06671, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.117, + 0.492, + 0.193 + ], + "angle": 0, + "content": "[10] Zoey Chen, Zhao Mandi, Homanga Bharadhwaj, Mohit Sharma, Shuran Song, Abhishek Gupta, and Vikash Kumar. Semantically controllable augmentations for generalizable robot learning. The International Journal of Robotics Research, page 02783649241273686, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.193, + 0.492, + 0.268 + ], + "angle": 0, + "content": "[11] Cheng Chi, Zhenjia Xu, Siyuan Feng, Eric Cousineau, Yilun Du, Benjamin Burchfiel, Russ Tedrake, and Shuran Song. Diffusion policy: Visuomotor policy learning via action diffusion. The International Journal of Robotics Research, page 02783649241273668, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.268, + 0.492, + 0.344 + ], + "angle": 0, + "content": "[12] Cheng Chi, Zhenjia Xu, Chuer Pan, Eric Cousineau, Benjamin Burchfiel, Siyuan Feng, Russ Tedrake, and Shuran Song. Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots. arXiv preprint arXiv:2402.10329, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.344, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[13] Ethan Chun, Yilun Du, Anthony Simeonov, Tomas Lozano-Perez, and Leslie Kaelbling. Local neural descriptor fields: Locally conditioned object representations for manipulation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 1830-1836. IEEE, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.434, + 0.492, + 0.495 + ], + "angle": 0, + "content": "[14] Murtaza Dalal, Min Liu, Walter Talbott, Chen Chen, Deepak Pathak, Jian Zhang, and Ruslan Salakhutdinov. Local policies enable zero-shot long-horizon manipulation. arXiv preprint arXiv:2410.22332, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.495, + 0.492, + 0.555 + ], + "angle": 0, + "content": "[15] Linxi Fan, Guanzhi Wang, De-An Huang, Zhiding Yu, Li Fei-Fei, Yuke Zhu, and Anima Anandkumar. Secant: Self-expert cloning for zero-shot generalization of visual policies. arXiv preprint arXiv:2106.09678, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.555, + 0.492, + 0.63 + ], + "angle": 0, + "content": "[16] Hao-Shu Fang, Chenxi Wang, Hongjie Fang, Minghao Gou, Jirong Liu, Hengxu Yan, Wenhai Liu, Yichen Xie, and Cewu Lu. Anygrasp: Robust and efficient grasp perception in spatial and temporal domains. IEEE Transactions on Robotics, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.63, + 0.492, + 0.706 + ], + "angle": 0, + "content": "[17] Jian Gao, Chun Gu, Youtian Lin, Zhihao Li, Hao Zhu, Xun Cao, Li Zhang, and Yao Yao. Relightable 3d gaussians: Realistic point cloud relighting with brdf decomposition and ray tracing. In European Conference on Computer Vision, pages 73-89. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.706, + 0.492, + 0.751 + ], + "angle": 0, + "content": "[18] Siddhant Haldar, Zhuoran Peng, and Lerrel Pinto. Baku: An efficient transformer for multi-task policy learning. arXiv preprint arXiv:2406.07539, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.751, + 0.492, + 0.811 + ], + "angle": 0, + "content": "[19] Nicklas Hansen and Xiaolong Wang. Generalization in reinforcement learning by soft data augmentation. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13611-13617. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.811, + 0.492, + 0.872 + ], + "angle": 0, + "content": "[20] Nicklas Hansen, Hao Su, and Xiaolong Wang. Stabilizing deep q-learning with convnets and vision transformers under data augmentation. Advances in neural information processing systems, 34:3680-3693, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.872, + 0.492, + 0.919 + ], + "angle": 0, + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.072, + 0.492, + 0.919 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.072, + 0.856, + 0.087 + ], + "angle": 0, + "content": "and pattern recognition, pages 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.087, + 0.922, + 0.147 + ], + "angle": 0, + "content": "[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (ToG), 37(6):1-15, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.147, + 0.922, + 0.236 + ], + "angle": 0, + "content": "[23] Alex Irpan, Alexander Herzog, Alexander Toshkov Toshev, Andy Zeng, Anthony Brohan, Brian Andrew Ichter, Byron David, Carolina Parada, Chelsea Finn, Clayton Tan, et al. Do as i can, not as i say: Grounding language in robotic affordances. In Conference on Robot Learning, number 2022, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.238, + 0.922, + 0.283 + ], + "angle": 0, + "content": "[24] Mazeyu Ji, Ri-Zhao Qiu, Xueyan Zou, and Xiaolong Wang. Graspsplats: Efficient manipulation with 3d feature splatting. arXiv preprint arXiv:2409.02084, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.283, + 0.922, + 0.343 + ], + "angle": 0, + "content": "[25] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.343, + 0.922, + 0.403 + ], + "angle": 0, + "content": "[26] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.403, + 0.922, + 0.465 + ], + "angle": 0, + "content": "[27] Georgios Kopanas, Thomas Leimkuhler, Gilles Rainer, Clément Jambon, and George Drettakis. Neural point catauastics for novel-view synthesis of reflections. ACM Transactions on Graphics (TOG), 41(6):1-15, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.465, + 0.922, + 0.524 + ], + "angle": 0, + "content": "[28] Ilya Kostrikov, Denis Yarats, and Rob Fergus. Image augmentation is all you need: Regularizing deep reinforcement learning from pixels. arXiv preprint arXiv:2004.13649, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.524, + 0.922, + 0.585 + ], + "angle": 0, + "content": "[29] Misha Laskin, Kimin Lee, Adam Stooke, Lerrel Pinto, Pieter Abbeel, and Aravind Srinivas. Reinforcement learning with augmented data. Advances in neural information processing systems, 33:19884-19895, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.584, + 0.922, + 0.645 + ], + "angle": 0, + "content": "[30] Mara Levy, Siddhant Haldar, Lerrel Pinto, and Abhinav Shirivastava. P3-po: Prescriptive point priors for visuospatial generalization of robot policies. arXiv preprint arXiv:2412.06784, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.645, + 0.922, + 0.719 + ], + "angle": 0, + "content": "[31] Xinhai Li, Jialin Li, Ziheng Zhang, Rui Zhang, Fan Jia, Tiancai Wang, Haoqiang Fan, Kuo-Kun Tseng, and Ruiping Wang. Robogsim: A real2sim2real robotic gaussian splatting simulator. arXiv preprint arXiv:2411.11839, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.72, + 0.922, + 0.795 + ], + "angle": 0, + "content": "[32] Zhihao Liang, Qi Zhang, Ying Feng, Ying Shan, and Kui Jia. Gs-ir: 3d gaussian splatting for inverse rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21644–21653, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.796, + 0.922, + 0.902 + ], + "angle": 0, + "content": "[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.902, + 0.922, + 0.919 + ], + "angle": 0, + "content": "[34] Guanxing Lu, Shiyi Zhang, Ziwei Wang, Changliu Liu," + }, + { + "type": "list", + "bbox": [ + 0.509, + 0.072, + 0.922, + 0.919 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.072, + 0.492, + 0.131 + ], + "angle": 0, + "content": "Jiwen Lu, and Yansong Tang. Manigaussian: Dynamic gaussian splatting for multi-task robotic manipulation. In European Conference on Computer Vision, pages 349-366. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.132, + 0.492, + 0.207 + ], + "angle": 0, + "content": "[35] Zhao Mandi, Homanga Bharadhwaj, Vincent Moens, Shuran Song, Aravind Rajeswaran, and Vikash Kumar. Cacti: A framework for scalable multi-task multi-scene visual imitation learning. arXiv preprint arXiv:2212.05711, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.208, + 0.492, + 0.297 + ], + "angle": 0, + "content": "[36] Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Rohun Kulkarni, Li Fei-Fei, Silvio Savarese, Yuke Zhu, and Roberto Martin-Martín. What matters in learning from offline human demonstrations for robot manipulation. arXiv preprint arXiv:2108.03298, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.298, + 0.492, + 0.373 + ], + "angle": 0, + "content": "[37] Ajay Mandlekar, Soroush Nasiriany, Bowen Wen, Iretiayo Akinola, Yashraj Narang, Linxi Fan, Yuke Zhu, and Dieter Fox. Mimicgen: A data generation system for scalable robot learning using human demonstrations. arXiv preprint arXiv:2310.17596, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.374, + 0.492, + 0.493 + ], + "angle": 0, + "content": "[38] Octo Model Team, Dibya Ghosh, Homer Walke, Karl Pertsch, Kevin Black, Oier Mees, Sudeep Dasari, Joel Hejna, Charles Xu, Jianlan Luo, Tobias Kreiman, You Liang Tan, Lawrence Yunliang Chen, Pannag Sanketi, Quan Vuong, Ted Xiao, Dorsa Sadigh, Chelsea Finn, and Sergey Levine. Octo: An open-source generalist robot policy. In Proceedings of Robotics: Science and Systems, Delft, Netherlands, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.494, + 0.492, + 0.569 + ], + "angle": 0, + "content": "[39] Abby O'Neill, Abdul Rehman, Abhinav Gupta, Abhiram Maddukuri, Abhishek Gupta, Abhishek Padalkar, Abraham Lee, Acorn Pooley, Agrim Gupta, Ajay Mandlekar, et al. Open x-embodiment: Robotic learning datasets and rt-x models. arXiv preprint arXiv:2310.08864, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.57, + 0.492, + 0.645 + ], + "angle": 0, + "content": "[40] Mohammad Nomaan Qureshi, Sparsh Garg, Francisco Yandun, David Held, George Kantor, and Abhisesh Silwal. Splatsim: Zero-shot sim2real transfer of rgb manipulation policies using gaussian splatting. arXiv preprint arXiv:2409.10161, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.645, + 0.492, + 0.72 + ], + "angle": 0, + "content": "[41] Tianhe Ren, Shilong Liu, Ailing Zeng, Jing Lin, Kun-chang Li, He Cao, Jiayu Chen, Xinyu Huang, Yukang Chen, Feng Yan, et al. Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.721, + 0.492, + 0.809 + ], + "angle": 0, + "content": "[42] Boxiang Rong, Artur Grigorev, Wenbo Wang, Michael J Black, Bernhard Thomaszewski, Christina Tsalicoglou, and Otmar Hilliges. Gaussian garments: Reconstructing simulation-ready clothing with photorealistic appearance from multi-view video. arXiv preprint arXiv:2409.08189, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.811, + 0.492, + 0.886 + ], + "angle": 0, + "content": "[43] Hyunwoo Ryu, Hong-in Lee, Jeong-Hoon Lee, and Jongeun Choi. Equivariant descriptor fields: Se (3)-equivariant energy-based models for end-to-end visual robotic manipulation learning. arXiv preprint arXiv:2206.08321, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.887, + 0.492, + 0.917 + ], + "angle": 0, + "content": "[44] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.072, + 0.492, + 0.917 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.072, + 0.92, + 0.087 + ], + "angle": 0, + "content": "Computer Vision and Pattern Recognition (CVPR), 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.088, + 0.922, + 0.147 + ], + "angle": 0, + "content": "[45] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.148, + 0.922, + 0.221 + ], + "angle": 0, + "content": "[46] Younggyo Seo, Junsu Kim, Stephen James, Kimin Lee, Jinwoo Shin, and Pieter Abbeel. Multi-view masked world models for visual robotic manipulation. In International Conference on Machine Learning, pages 30613-30632. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.223, + 0.922, + 0.311 + ], + "angle": 0, + "content": "[47] Ola Shorinwa, Johnathan Tucker, Aliyah Smith, Aiden Swann, Timothy Chen, Roya Firoozi, Monroe Kennedy III, and Mac Schwager. Splat-mover: Multi-stage, open-vocabulary robotic manipulation via editable gaussian splatting. arXiv preprint arXiv:2405.04378, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.313, + 0.922, + 0.403 + ], + "angle": 0, + "content": "[48] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. In 2022 International Conference on Robotics and Automation (ICRA), pages 6394-6400. IEEE, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.404, + 0.922, + 0.464 + ], + "angle": 0, + "content": "[49] Ritvik Singh, Arthur Allshire, Ankur Handa, Nathan Ratliff, and Karl Van Wyk. Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands. arXiv preprint arXiv:2412.01791, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.465, + 0.922, + 0.524 + ], + "angle": 0, + "content": "[50] Stephen Tian, Blake Wulfe, Kyle Sargent, Katherine Liu, Sergey Zakharov, Vitor Guizilini, and Jiajun Wu. View-invariant policy learning via zero-shot novel view synthesis. arXiv preprint arXiv:2409.03685, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.525, + 0.922, + 0.584 + ], + "angle": 0, + "content": "[51] Yang Tian, Sizhe Yang, Jia Zeng, Ping Wang, Dahua Lin, Hao Dong, and Jiangmiao Pang. Predictive inverse dynamics models are scalable learners for robotic manipulation. arXiv preprint arXiv:2412.15109, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.585, + 0.922, + 0.659 + ], + "angle": 0, + "content": "[52] Marcel Torne, Anthony Simeonov, Zechu Li, April Chan, Tao Chen, Abhishek Gupta, and Pulkit Agrawal. Reconciling reality through simulation: A real-to-sim-to-real approach for robust manipulation. arXiv preprint arXiv:2403.03949, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.66, + 0.922, + 0.705 + ], + "angle": 0, + "content": "[53] Pietro Vitiello, Kamil Dreczkowski, and Edward Johns. One-shot imitation learning: A pose estimation perspective. arXiv preprint arXiv:2310.12077, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.706, + 0.922, + 0.751 + ], + "angle": 0, + "content": "[54] Vitalis Vosylius and Edward Johns. Instant policy: Incontext imitation learning via graph diffusion. arXiv preprint arXiv:2411.12633, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.752, + 0.922, + 0.811 + ], + "angle": 0, + "content": "[55] Hongtao Wu, Ya Jing, Chilam Cheang, Guangzeng Chen, Jiafeng Xu, Xinghang Li, Minghuan Liu, Hang Li, and Tao Kong. Unleashing large-scale video generative pretraining for visual robot manipulation, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.812, + 0.922, + 0.885 + ], + "angle": 0, + "content": "[56] Yuxuan Wu, Lei Pan, Wenhua Wu, Guangming Wang, Yanzi Miao, and Hesheng Wang. Rl-gsbridge: 3d gaussian splatting based real2sim2real method for robotic manipulation learning. arXiv preprint arXiv:2409.20291, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.887, + 0.922, + 0.917 + ], + "angle": 0, + "content": "[57] Jianfeng Xiang, Zelong Lv, Sicheng Xu, Yu Deng, Ruicheng Wang, Bowen Zhang, Dong Chen, Xin Tong," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.072, + 0.922, + 0.917 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.071, + 0.492, + 0.116 + ], + "angle": 0, + "content": "and Jiaolong Yang. Structured 3d latents for scalable and versatile 3d generation. arXiv preprint arXiv:2412.01506, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.117, + 0.492, + 0.207 + ], + "angle": 0, + "content": "[58] Tianyi Xie, Zeshun Zong, Yuxing Qiu, Xuan Li, Yutao Feng, Yin Yang, and Chenfanfu Jiang. Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4389-4398, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.208, + 0.492, + 0.282 + ], + "angle": 0, + "content": "[59] Zhengrong Xue, Shuying Deng, Zhenyang Chen, Yixuan Wang, Zhecheng Yuan, and Huazhe Xu. Demogen: Synthetic demonstration generation for data-efficient visuomotor policy learning. arXiv preprint arXiv:2502.16932, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.283, + 0.492, + 0.357 + ], + "angle": 0, + "content": "[60] Jingyun Yang, Zi-ang Cao, Congyue Deng, Rika Antonova, Shuran Song, and Jeannette Bohg. Equibot: Sim (3)-equivariant diffusion policy for generalizable and data efficient learning. arXiv preprint arXiv:2407.01479, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.359, + 0.492, + 0.448 + ], + "angle": 0, + "content": "[61] Jingyun Yang, Congyue Deng, Jimmy Wu, Rika Antonova, Leonidas Guibas, and Jeannette Bohg. Equiv-act: Sim (3)-equivariant visuomotor policies beyond rigid object manipulation. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 9249–9255. IEEE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.449, + 0.492, + 0.538 + ], + "angle": 0, + "content": "[62] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.539, + 0.492, + 0.599 + ], + "angle": 0, + "content": "[63] Sizhe Yang, Yanjie Ze, and Huazhe Xu. Movie: Visual model-based policy adaptation for view generalization. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.601, + 0.492, + 0.689 + ], + "angle": 0, + "content": "[64] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20331-20341, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.691, + 0.492, + 0.751 + ], + "angle": 0, + "content": "[65] Mingqiao Ye, Martin Danelljan, Fisher Yu, and Lei Ke. Gaussian grouping: Segment and edit anything in 3d scenes. In European Conference on Computer Vision, pages 162-179. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.752, + 0.492, + 0.825 + ], + "angle": 0, + "content": "[66] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.827, + 0.492, + 0.901 + ], + "angle": 0, + "content": "[67] Tianhe Yu, Ted Xiao, Austin Stone, Jonathan Tompson, Anthony Brohan, Su Wang, Jaspiar Singh, Clayton Tan, Jodilyn Peralta, Brian Ichter, et al. Scaling robot learning with semantically imagined experience. arXiv preprint arXiv:2302.11550, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.902, + 0.492, + 0.918 + ], + "angle": 0, + "content": "[68] Chengbo Yuan, Suraj Joshi, Shaoting Zhu, Hang Su," + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.071, + 0.492, + 0.918 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.071, + 0.922, + 0.131 + ], + "angle": 0, + "content": "Hang Zhao, and Yang Gao. Roboengine: Plug-and-play robot data augmentation with semantic robot segmentation and background generation. arXiv preprint arXiv:2503.18738, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.132, + 0.922, + 0.207 + ], + "angle": 0, + "content": "[69] Zhecheng Yuan, Tianming Wei, Shuiqi Cheng, Gu Zhang, Yuanpei Chen, and Huazhe Xu. Learning to manipulate anywhere: A visual generalizable framework for reinforcement learning. arXiv preprint arXiv:2407.15815, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.208, + 0.922, + 0.253 + ], + "angle": 0, + "content": "[70] Xinyu Zhang and Abdeslam Boullarias. One-shot imitation learning with invariance matching for robotic manipulation. arXiv preprint arXiv:2405.13178, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.253, + 0.922, + 0.329 + ], + "angle": 0, + "content": "[71] Yuhang Zheng, Xiangyu Chen, Yupeng Zheng, Songen Gu, Runyi Yang, Bu Jin, Pengfei Li, Chengliang Zhong, Zengmao Wang, Lina Liu, et al. Gaussiangrasper: 3d language gaussian splatting for open-vocabulary robotic grasping. arXiv preprint arXiv:2403.09637, 2024." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.922, + 0.329 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.246, + 0.073, + 0.322, + 0.085 + ], + "angle": 0, + "content": "APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.094, + 0.471, + 0.11 + ], + "angle": 0, + "content": "A. Applying Transformation and Scaling to 3D Gaussians" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.115, + 0.491, + 0.145 + ], + "angle": 0, + "content": "This section outlines how to apply transformations (translation, rotation) and scaling to 3D Gaussians." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.146, + 0.492, + 0.252 + ], + "angle": 0, + "content": "The Gaussian primitive typically possesses three core properties: 1) a center position in three-dimensional space; 2) an orientation that specifies the tilt of its principal axes, commonly represented as a quaternion; 3) a scale indicating its width or narrowness. Additionally, Gaussian primitives can be enhanced with Spherical Harmonics (SH) to capture complex, direction-dependent color features." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.252, + 0.492, + 0.358 + ], + "angle": 0, + "content": "When applying a transformation to the Gaussian primitive, the following steps should be taken: 1) update the center position by scaling, rotating, and then adding the translation offset; 2) update the orientation by combining the existing rotation with the new rotation; 3) adjust the scale by multiplying by the scaling factor; 4) rotate the Spherical Harmonics coefficients by using the Wigner D matrices." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.371, + 0.426, + 0.387 + ], + "angle": 0, + "content": "B. Details of Demonstration Augmentation Process" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.392, + 0.491, + 0.437 + ], + "angle": 0, + "content": "We expand on the details of the demonstration augmentation process in this section. An illustration of augmented demonstrations is provided in Fig. 12." + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.439, + 0.199, + 0.453 + ], + "angle": 0, + "content": "1) Object pose" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.454, + 0.492, + 0.529 + ], + "angle": 0, + "content": "As mentioned in Sec. IV-B1, we transform the end-effector poses at key frames equivariantly according to the transformation that is applied to the target object. However, considering the symmetry of the gripper, we perform post-processing on the transformed end-effector pose." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.53, + 0.492, + 0.577 + ], + "angle": 0, + "content": "Suppose the rotation of the transformed end-effector pose can be expressed as \\((r_x, r_y, r_z)\\) in the format of XYZ Euler angles. We replace \\(r_z\\) with \\(r_z'\\), which can be calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.594, + 0.389, + 0.651 + ], + "angle": 0, + "content": "\\[\nr _ {z} ^ {\\prime} = \\left\\{ \\begin{array}{l l} r _ {z} & - \\frac {\\pi}{2} \\leqslant r _ {z} \\leqslant \\frac {\\pi}{2} \\\\ r _ {z} + \\pi & r _ {z} < - \\frac {\\pi}{2} \\\\ r _ {z} - \\pi & r _ {z} > \\frac {\\pi}{2}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.657, + 0.49, + 0.703 + ], + "angle": 0, + "content": "The resulting Euler angles \\((r_x, r_y, r_z')\\) form the final rotation of the end-effector, which prevents the end-effector from performing redundant rotation along its \\(z\\)-axis." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.704, + 0.208, + 0.717 + ], + "angle": 0, + "content": "2) Camera view" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.719, + 0.492, + 0.856 + ], + "angle": 0, + "content": "As aforementioned in Sec. V-D3, we enumerate the hyperparameters of camera view augmentations and their range of randomization in Table III. Suppose the camera view in the expert demonstration has target point \\( O_{c}^{\\mathrm{expert}} = (x_{c}^{0},y_{c}^{0},z_{c}^{0}) \\) and corresponding spherical coordinates \\( (r^0,\\theta^0,\\varphi^0) \\). Thereby, the target point \\( O_{c} = (x_{c},y_{c},z_{c}) \\) and corresponding spherical coordinates \\( (r,\\theta ,\\varphi) \\) are sampled from uniform distributions, ranging between \\( (x_c^0\\pm \\Delta x_c,y_c^0\\pm \\Delta y_c,z_c^0\\pm \\Delta z_c,r^0\\pm \\Delta r,\\theta^0\\pm \\Delta \\theta ,\\varphi^0\\pm \\Delta \\varphi) \\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.857, + 0.242, + 0.871 + ], + "angle": 0, + "content": "3) Lighting condition" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.871, + 0.492, + 0.918 + ], + "angle": 0, + "content": "We present the hyperparameters of lighting condition augmentation in this section. First, we normalize the RGB values of each pixel with minimum value 0 and maximum value 1." + }, + { + "type": "table_caption", + "bbox": [ + 0.504, + 0.067, + 0.921, + 0.097 + ], + "angle": 0, + "content": "TABLE III: Camera view augmentation hyperparameters and their range of randomization." + }, + { + "type": "table", + "bbox": [ + 0.614, + 0.104, + 0.809, + 0.231 + ], + "angle": 0, + "content": "
HyperparameterValue
Δxc0.1(m)
Δyc0.1(m)
Δzc0.1(m)
Δr0.2(m)
Δθπ/6
Δφπ/6
" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.26, + 0.921, + 0.291 + ], + "angle": 0, + "content": "Then, we stipulate that the hyperparameters are sampled from the following distributions:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.312, + 0.838, + 0.369 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left(\\Delta_ {r}, \\Delta_ {g}, \\Delta_ {b}\\right) \\sim \\mathcal {N} (\\mathbf {0}, 0. 1 ^ {2} \\mathbf {I}), \\\\ s _ {r}, s _ {g}, s _ {b} \\sim \\text {U n i f o r m} (0. 3, 1. 8), \\\\ o _ {r}, o _ {g}, o _ {b} \\sim \\text {U n i f o r m} (- 0. 3, 0. 3). \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.39, + 0.661, + 0.405 + ], + "angle": 0, + "content": "C. Policy Architecture" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.411, + 0.921, + 0.517 + ], + "angle": 0, + "content": "As illustrated in Fig. 13, the policy processes two types of inputs: images and robot states. We use different encoders to tokenize each modality accordingly. For image inputs, the images are first passed through a ResNet-18 vision encoder to generate visual embeddings. We employ a linear layer to extract compact visual features. For the robot state, we encode it into state tokens using a multi-layer perceptron (MLP)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.518, + 0.92, + 0.608 + ], + "angle": 0, + "content": "The multi-modal encoder in our model is based on a GPT-2 style transformer architecture. Before feeding the sequential image and state tokens into the transformer, we append readout tokens [ACT] to the end. These readout tokens attend to embeddings from different modalities, serving as action latents used for action prediction." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.609, + 0.92, + 0.715 + ], + "angle": 0, + "content": "Encoded by the multi-modal encoder, the action latents generated by the [ACT] tokens are fed into the readout decoders to predict actions. The action decoder utilizes an MLP to transform the action latent into the action vector. We predict a chunk of 10 future actions. Compared to single-step action prediction, predicting multiple steps provides temporal action consistency and robustness to idle actions [11]." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.729, + 0.644, + 0.744 + ], + "angle": 0, + "content": "D. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.75, + 0.921, + 0.854 + ], + "angle": 0, + "content": "During training, the input at each timestep consists of two images captured from two eye-on-base cameras, along with the robot state. The robot state includes both the arm state and the gripper state. The gripper state is binary, indicating whether the gripper is open or closed. For the Franka FR3 robot, the arm state is 7-dimensional, while for the UR5e robot, it is 6-dimensional." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.856, + 0.922, + 0.918 + ], + "angle": 0, + "content": "The policy operates with a history length of 1, and the size of the action chunk is set to 10. During inference, we utilize temporal ensemble techniques to compute a weighted average of the multi-step actions." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.064, + 0.879, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.659, + 0.922, + 0.691 + ], + "angle": 0, + "content": "Fig. 12: Illustration of augmented demonstrations. Type of generalization from the top row to the bottom row: object pose, lighting condition, scene appearance, object type, camera view, and embodiment type." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.712, + 0.491, + 0.788 + ], + "angle": 0, + "content": "The policy is trained using a single NVIDIA RTX 4090 GPU, with a batch size of 256 and a learning rate of 1e-4. Depending on the number of demonstrations, the policy is trained for varying numbers of epochs. The hyperparameters used during training are detailed in Table IV." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.794, + 0.414, + 0.809 + ], + "angle": 0, + "content": "E. Illustration of Real-World Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.812, + 0.491, + 0.872 + ], + "angle": 0, + "content": "We illustrate the experiment settings on lighting condition generalization in Fig. 14. The flashing light alternates between red and blue light at a frequency of \\(4\\mathrm{Hz}\\). Every lighting condition takes up 6 trials in a single experiment." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.873, + 0.492, + 0.918 + ], + "angle": 0, + "content": "Besides, we present the real-world settings on appearance generalization in Fig. 15. Each scenario accounts for 5 trials in a single experiment." + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.729, + 0.895, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.613, + 0.881, + 0.816, + 0.897 + ], + "angle": 0, + "content": "Fig. 13: Policy architecture." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.337, + 0.088, + 0.66, + 0.104 + ], + "angle": 0, + "content": "TABLE IV: Policy training hyperparameters." + }, + { + "type": "table", + "bbox": [ + 0.33, + 0.111, + 0.663, + 0.464 + ], + "angle": 0, + "content": "
Batch Size256
Learning Rate1e-4
Training Epochs1400 (100 demonstrations)
1000 (200 demonstrations)
800 (400 demonstrations)
700 (800 demonstrations)
500 (1800 demonstrations)
300 (3200 demonstrations)
200 (6400 demonstrations)
Image Size128*128
OptimizerAdamW
History Length1
Action Chunk Length10
" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.523, + 0.368, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.675, + 0.306, + 0.688 + ], + "angle": 0, + "content": "(a) Flashing light (Red)" + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.523, + 0.627, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.675, + 0.566, + 0.688 + ], + "angle": 0, + "content": "(b) Flashing light (Blue)" + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.523, + 0.885, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.719, + 0.675, + 0.796, + 0.688 + ], + "angle": 0, + "content": "(c) Dark light" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.698, + 0.368, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.199, + 0.851, + 0.285, + 0.864 + ], + "angle": 0, + "content": "(d) Bright light" + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.698, + 0.627, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.458, + 0.851, + 0.542, + 0.864 + ], + "angle": 0, + "content": "(e) Green light" + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.698, + 0.885, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.714, + 0.851, + 0.8, + 0.864 + ], + "angle": 0, + "content": "(f) Yellow light" + }, + { + "type": "image_caption", + "bbox": [ + 0.239, + 0.876, + 0.757, + 0.892 + ], + "angle": 0, + "content": "Fig. 14: Illustration of real-world experiment on lighting generalization." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.113, + 0.308, + 0.369, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.463, + 0.248, + 0.473 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.308, + 0.627, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.488, + 0.463, + 0.505, + 0.473 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.308, + 0.886, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.463, + 0.763, + 0.473 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.483, + 0.37, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.638, + 0.248, + 0.649 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.483, + 0.627, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.488, + 0.639, + 0.505, + 0.649 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.483, + 0.886, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.638, + 0.763, + 0.649 + ], + "angle": 0, + "content": "(f)" + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.661, + 0.772, + 0.677 + ], + "angle": 0, + "content": "Fig. 15: Illustration of real-world experiment on appearance generalization." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_origin.pdf b/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fc0f6d36c340c2ff450177b7d54eee1704c1a038 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/af77399d-9fc6-4c89-b068-201a085f200a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f06da677b648d517823d902d0ef060a0ae77f7d8bff1a33e25343a3927859592 +size 17018238 diff --git a/data/2025/2504_13xxx/2504.13175/full.md b/data/2025/2504_13xxx/2504.13175/full.md new file mode 100644 index 0000000000000000000000000000000000000000..4f1d706ab1979bbad0538cbdbdb05f8db94c8be2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/full.md @@ -0,0 +1,557 @@ +# Novel Demonstration Generation with Gaussian Splitting Enables Robust One-Shot Manipulation + +Sizhe Yang\*,1,2 Wenye $\mathrm{Yu}^{*,1,3}$ Jia Zeng $^{1}$ Jun Lv $^{3}$ Kerui Ren $^{1,3}$ + +Cewu Lu $^{3}$ Dahua Lin $^{1,2}$ Jiangmiao Pang $^{1,\dagger}$ + +$^{1}$ Shanghai AI Laboratory $^{2}$ The Chinese University of Hong Kong + +$^{3}$ Shanghai Jiao Tong University + +* Equal contributions † Corresponding author + +Project page: https://yangsizhe.github.io/robosplat/ + +![](images/b41581ae16497916fa7f0a8c905149a917be9de8c31d49423683b8755835e764.jpg) +Novel Demonstration Generation + +![](images/e88c849405e5c7727046c30ea2b953484ed9a565c0b49f67a68419533bbd9545.jpg) + +![](images/ad8a63afe86e54c84c472d26fc0ff1ed8c976194f0e749cdf2deb8a3d08d55a2.jpg) +Fig. 1: Starting from a single expert demonstration and multi-view images, our method generates diverse and visually realistic data for policy learning, enabling robust performance across six types of generalization in the real world. Compared to previous 2D data augmentation methods, our approach achieves significantly better results across various generalization types. Notably, we achieve this within a unified framework. + +![](images/32f802e7e30eab9ebb3a512bc0d66bfde93a7cfed378087e78ecc83db282745b.jpg) + +![](images/651c8579ed255e83ff43a3e35df049e75c3f7ca851ccbfa6c751c1ccb609b141.jpg) + +![](images/f3eaa222de3279e0d39362bc71e285457b8dcac0a80024bc991bb38326d20e19.jpg) + +![](images/c5c09f64051cd724f7b3e6312ccfdddac8aea763c0c913815e3c6e8c9607d583.jpg) + +![](images/54159b48155d2bc94fb982430141147332c7daf413cded000d9ad7a9a5c27bfe.jpg) +Generalization + +![](images/81cb33fba68552587a99e632f725c1fd0106b2bf17cec9b502a1101e7d1927c5.jpg) + +![](images/5d09305c79d9a9ea490929ca7f29f0ac25de87e3b285cb4657c67099cc780c0e.jpg) +Training Data Source: + +![](images/ffbe554629ab2c7fde2741f4a6ad747dff67168c471d6998a60a66393fb36830.jpg) + +![](images/6c712ee580435498113446ac682efcc00cab680a3b4f15bd27073ade5c0eb34a.jpg) + +![](images/6bd1b346120cd70446996e5df9bbe70575020c94491592e0f4020894b7c819bf.jpg) + +![](images/bfa84d627f027b5cabf7370abddf61fa5b720604e608b37159e90f00ff8370c3.jpg) + +![](images/e1c1d0890a88776f034814686b64841cb1e31357eba2629bd3995babb7dd23ff.jpg) + +![](images/921706bfd91926b2ddfacffade625fe78c513762e6ee3133e762d51a20542847.jpg) +Manually Collected + Previous 2D Augmentation + +![](images/35d892719a6309036eb90f1987157059a3f1914422d61cb54e28d76a70dda9bb.jpg) + +![](images/0c6dd535670c9fee89fab0af96d59af261a5c6532651dadedc7cbe5d9a74f3f0.jpg) + +![](images/cbbee3983b4fa45dcfa7ca8c7ae0abebe680859c6470cb42bd0fe8d571a960d1.jpg) +Manually Collected Ours (Generated) + +Abstract—Visuomotor policies learned from teleoperated demonstrations face challenges such as lengthy data collection, high costs, and limited data diversity. Existing approaches address these issues by augmenting image observations in RGB space or employing Real-to-Sim-to-Real pipelines based on physical simulators. However, the former is constrained to 2D data augmentation, while the latter suffers from imprecise physical simulation caused by inaccurate geometric reconstruction. This paper introduces RoboSplat, a novel method that generates diverse, visually realistic demonstrations by directly manipulating 3D Gaussians. Specifically, we reconstruct the scene through 3D Gaussian Splatting (3DGS), directly edit the reconstructed scene, and augment data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types. Comprehensive real-world experiments demonstrate that RoboSplat significantly enhances the generalization of visuomotor policies under diverse disturbances. Notably, while policies trained on hundreds of real-world demonstrations with additional + +2D data augmentation achieve an average success rate of $57.2\%$ , RoboSplat attains $87.8\%$ in one-shot settings across six types of generalization in the real world. + +# I. INTRODUCTION + +Imitation learning for visuomotor policies has emerged as a promising paradigm in robot manipulation. However, policies learned through imitation often display limited robustness in deployment scenarios that differ substantially from expert demonstrations, primarily due to insufficient coverage of visual domains in the training data. Increasing the volume and diversity of real-world data is an effective strategy for enhancing robustness [12]; however, acquiring human-collected demonstrations is prohibitively time-consuming and labor-intensive. Consequently, substantial efforts have been devoted to generating diverse expert data without engaging with real-world environments [68, 69, 49, 8, 10, 67, 9, 35, 50, 59]. + +Simulated environments offer a low-cost platform for data synthesis [49, 69]. However, the Sim-to-Real gap presents + +significant challenges that hinder policy performance in real-world scenarios. Although Real-to-Sim-to-Real pipelines can narrow this gap considerably, replicating real-world manipulation scenes in simulation remains complex and labor-intensive. In particular, inaccuracies in geometric reconstructions often lead to imprecise physical simulations. Moreover, existing Real-to-Sim-to-Real approaches primarily generate data within monotonously reconstructed scenes, resulting in policies that are tailored only to those specific environments. Another line of work sheds light on augmenting image observations for better visual generalization. By editing different semantic parts of the image, these approaches generate novel scene configurations, in terms of background appearances [68, 9, 67, 10], embodiment types [8], object types [67], and camera views [50]. While these image augmentation methods are convenient, their limited consideration of 3D spatial information results in spatially inaccurate data generation. For more effective data augmentation, explicit 3D representations that retain accurate spatial information and are realistically renderable are required. + +Recently, 3D Gaussian Splatting (3DGS) [25] has become a burgeoning approach to superior reconstruction and rendering. Thanks to its explicit representation of the scene, 3DGS enables interpretable editing of the reconstructed scene, which paves the way for generating novel manipulation configurations. Furthermore, as a 3D representation of the scene, 3DGS retains spatial information from the real world and allows for consistent rendering from multiple perspectives, which makes it the real-world counterpart of a simulator's graphics engine for generating novel demonstrations. + +Based on that, we propose RoboSplat, a novel and efficacious approach to demonstration generation with Gaussian Splitting. Empowered by 3DGS, we achieve a high-fidelity reconstruction of the manipulation scene. In order to align the reconstructed scene with real-world counterparts, we devise a novel frame alignment pipeline leveraging differentiable rendering of Gaussian Splitting. 3D Gaussians of different scene components are segmented using off-the-shelf segmentation models and the robot United Robotics Description Format (URDF). Remarkably, as illustrated in Fig. 1, a single collected expert trajectory enables us to generate novel demonstrations across a wide range of visual domains. To be specific, RoboSplat augments data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types. + +Compared to previous Real-to-Sim-to-Real and image augmentation approaches, RoboSplat achieves more diverse and spatially accurate data generation. Extensive real-world experiments demonstrate that RoboSplat significantly enhances the robustness of visuomotor policies against multiple disturbances across tasks involving pick and place, tool use, functional motion, articulated object manipulation, and long + +horizon skills. Specifically, compared to policies trained on hundreds of real-world demonstrations that are further enriched with 2D data augmentation, our method increases the average success rate from $57.2\%$ to $87.8\%$ . + +# II. RELATED WORK + +# A. Generalizable Policy in Robot Manipulation + +Recent advancements in manipulation have significantly enhanced generalization. Some studies design the policy architecture to endow it with equivariant properties, which is helpful to generalizing to different object poses [60, 61, 43, 13]. One-shot imitation learning approaches like [54, 48, 6, 53, 70] enable the policy to handle various object poses given only one demonstration. Furthermore, some other work focuses on generalizing the policy to different camera views [69, 46, 63], scene appearance [30, 51], and embodiments [12]. Some studies exploit the power of Large Language Models (LLMs) and Vision Language Models (VLMs) to endow robots with generalization abilities [23, 7, 39, 14]. Instead of adopting generalizable policy architecture, auxiliary learning objectives and powerful foundation models, our work is concentrated on generating high-quality, diverse, and realistic data to instill generalization abilities to the learned policy. + +# B. Data Augmentation for Policy Learning + +Given limited training data, data augmentation emerges as a way to improve the robustness of the policy. Previous work adopts image augmentation techniques to improve the resistance of visuomotor policies to observation noises [29, 28, 36, 37, 15, 19, 20]. However, these methods are mainly evaluated in simulated environments. To deploy learned policies in real-world setting, some previous work focuses on augmenting the appearance of the scene by incorporating image-inpainting models [67, 10, 9, 35]. Moreover, Tian et al. [50] generate augmented task demonstrations from different camera views and aim to learn a view-invariant policy. Ameperosa et al. [3]. Chen et al. [8] further devise a cross-embediment pipeline by inpainting different robots to image observations. Nonetheless, these studies mainly augment task demonstrations on 2D images, which lack spatial information. Hence, only limited augmentation can be achieved, and the augmented demonstrations might be unrealistic compared to those generated directly from 3D representations. Our work reconstructs the scene with 3D Gaussian Splatting and edits the 3D representation for data augmentation, enabling our policy to achieve comprehensive generalization across object poses, object types, camera views, lighting conditions, scene appearance, and various embodiments.. + +# C. Gaussian Splitting in Robotics + +3D Gaussian Splatting (3DGS) [25] serves as an explicit radiance field representation for real-time rendering of 3D scenes. Previous work leverages 3DGS to select proper grasp poses [24, 71]. Furthermore, Lu et al. [34] exploit 3DGS to + +![](images/47ce93682b0b588d7c62b851e04f002ea8def19b6f3b84c40cf9f6e071ca61ba.jpg) + +![](images/9e2a04949ec35b9429c9369bf9c1e653d07dfe421af7984acd97fbe9fab0f4b9.jpg) +Fig. 2: Method overview. We start from a single manually collected demonstration and multi-view images that capture the whole scene. The former provides task-related keyframes, while the latter helps scene reconstruction. After aligning the reconstructed frame with the real-world frame and segmenting different scene components, we carry out autonomous editing of the scene in pursuit of six types of augmentation. + +![](images/58b7becf4a69a1455733dc9abbcd8d4f00caa882618621bd9e10cdabb7818fb2.jpg) + +construct dynamics of the scene for multi-task robot manipulation. In order to predict the consequence of robots' interactions with the environment, Shorinwa et al. [47] leverage 3D semantic masking and infilling to visualize the motions of the objects that result from the interactions. Another line of work adopts the Real-to-Sim-to-Real pipeline, and utilizes 3DGS to reconstruct the real-world scene [31, 40, 56, 52]. However, importing reconstructed real-world objects to simulation is a strenuous process, and physical interactions tend to suffer from large sim-to-real gaps due to the flawed geometric reconstruction and lack of physical information in 3D reconstruction. Some recent work on 3DGS is centered around editing and relighting of the scene [65, 32, 17]. Our method enables autonomous editing of the reconstructed scene to generate diverse demonstrations with various configurations. + +# III. PRELIMINARIES + +3D Gaussian Splatting (3DGS) [25] utilizes multi-view images for high-fidelity scene reconstruction. The scene is represented by a set of Gaussians $\{g_i\}_{i=1}^N$ , where each Gaussian $g_i$ consists of a position vector $\mu_i \in \mathbb{R}^3$ , a rotation matrix $R_i \in \mathbb{R}^{3 \times 3}$ , a scaling matrix $S_i = \text{diag}(s)(s \in \mathbb{R}^3)$ , an opacity factor $\alpha_i \in \mathbb{R}$ , and spherical harmonic coefficients $c_i$ that encapsulate the view-dependent color appearance of the Gaussian. Given the scaling matrix and rotation matrix, the covariance matrix $\Sigma_i$ is calculated as follows: + +$$ +\Sigma_ {i} = R _ {i} S _ {i} S _ {i} ^ {\top} R _ {i} ^ {\top}. +$$ + +To derive the color $C$ of a particular pixel during rendering procedure, 3DGS exploits a typical neural point-based approach, similar to Kopanas et al. [27], where the final color value is calculated as follows: + +$$ +C = \sum_ {i = 1} ^ {N} c _ {i} o _ {i} \prod_ {j = 1} ^ {j = i - 1} (1 - o _ {j}), +$$ + +$$ +o _ {i} = \alpha_ {i} \cdot \exp \left(\frac {1}{2} \delta_ {i} ^ {\intercal} \Sigma_ {i, 2 D} ^ {- 1} \delta_ {i}\right), +$$ + +where $N$ is the number of Gaussians that overlap with the pixel. Besides, $\alpha_{i}$ denotes the opacity of the $i$ -th Gaussian. $\delta_{i} \in \mathbb{R}^{2}$ denotes the offset between the current pixel and the center of the $i$ -th Gaussian projected to 2D image. $\Sigma_{i,2D} \in \mathbb{R}^{2 \times 2}$ stands for the covariance matrix of the $i$ -th Gaussian projected to 2D image. + +# IV. METHODOLOGY + +To generate high-fidelity and diverse data from a single expert trajectory, we present RoboSplat, a novel demonstration generation approach based on 3DGS. An overview of our method is shown in Fig. 2. In this section, we describe RoboSplat in detail. We begin with the process of reconstruction and preprocessing in Sec. IV-A, which includes object and scene reconstruction, frame alignment with differentiable rendering, and novel pose generation for the robot and objects. With all the Gaussian models ready, we generate novel demonstrations and perform data augmentation in terms of object + +poses, object types, camera views, scene appearance, lighting conditions, and embodiments, as described in Sec. IV-B. Finally, a visuomotor policy is trained on the augmented demonstrations and directly deployed on real robots, as detailed in Sec. IV-C. + +# A. Reconstruction and Preprocessing + +In pursuit of a high-fidelity reconstruction of the scene, we first capture a set of RGB images whose corresponding viewpoints should be as various as possible. During this process, the scene remains static and the robot is fixed at its default joint configuration, which we refer to as $q_{\mathrm{default}}$ . With the images ready, we utilize COLMAP [45, 44] to obtain a sparse scene reconstruction and an estimation of the camera pose corresponding to each image. To further enhance the reconstruction precision, we gain an depth estimation for each image with Depth Anything [62]. The images, camera poses, and depth prior serve as inputs to 3DGS [25], which returns 3D Gaussians representing the entire scene $\mathcal{G}_{\mathrm{scene}}$ , which contains 3D Gaussians corresponding to the robot, dubbed $\mathcal{G}_{\mathrm{robot}}$ . + +However, the reconstructed 3D Gaussians of the robot are represented in an arbitrary frame $\mathcal{F}_{\mathrm{scene}}$ , and hence we need to align it with the real-world coordinate frame $\mathcal{F}_{\mathrm{real}}$ to facilitate automated editing. + +The robot URDF gives us access to the robot base frame $\mathcal{F}_{\mathrm{URDF}}$ . The real-world robot frame $\mathcal{F}_{\mathrm{robot}}$ , $\mathcal{F}_{\mathrm{URDF}}$ , and $\mathcal{F}_{\mathrm{real}}$ are all aligned with each other. Hence, the actual problem turns into the frame alignment from $\mathcal{F}_{\mathrm{scene}}$ to $\mathcal{F}_{\mathrm{URDF}}$ . We denote the transformation matrix as $\mathcal{T}_{\mathrm{URDF, scene}}$ . While point cloud registration approaches, such as Iterative Closest Point (ICP) [5], serve as a common solution to it, we find that there is still major misalignment between the two frames aligned with point cloud registration, as illustrated in Fig. 3. The reason lies in the fact that point cloud registration is based on point coordinates, whereas 3D Gaussians have a scale attribute, which causes a mismatch between point coordinates and the appearance. Therefore, we exploit the differentiable rendering of 3DGS to do further fine-grained alignment, as depicted in Fig. 4. + +Suppose $\hat{\mathcal{T}}_{\mathrm{URDF, scene}}^{0}$ is the initial transformation matrix obtained through ICP. We first apply $\hat{\mathcal{T}}_{\mathrm{URDF, scene}}^{0}$ to $\mathcal{G}_{\mathrm{robot}}$ leading to a partially aligned robot Gaussian $\hat{\mathcal{G}}_{\mathrm{robot}}$ . The aim of further alignment is to derive another transformation matrix $\hat{\mathcal{T}}_{\mathrm{rel}}$ , such that applying $\hat{\mathcal{T}}_{\mathrm{rel}}$ to $\hat{\mathcal{G}}_{\mathrm{robot}}$ gives a better alignment to the pose of the robot defined in URDF. For this sake, we select $N$ canonical camera views to capture the segmentation masks $\{\mathcal{I}_i^{\mathrm{URDF}}\}_{i = 1}^N$ and $\{\mathcal{I}_i^{\mathrm{Gaussian}}\}_{i = 1}^N$ (the pixel value is 1 if it belongs to the robot; otherwise, it is 0) with the robot URDF and $\hat{\mathcal{G}}_{\mathrm{robot}}$ respectively. The pixel-wise differences between the images from the same canonical views are averaged to form the objective function of alignment: + +$$ +\mathcal {L} _ {\text {a l i g n}} = \frac {1}{N} \sum_ {i = 1} ^ {N} \left(\mathcal {I} _ {i} ^ {\text {U R D F}} - \mathcal {I} _ {i} ^ {\text {G a u s s i a n}}\right) ^ {2}. +$$ + +Due to the differentiability of Gaussian Splitting, we can + +![](images/1a73a53de5cf7d5969a3255354730536448b391b83ca1ad3bb975821256acbe8.jpg) +Fig. 3: Comparison of frame alignment results between ICP and fine-grained optimization with differentiable rendering. The semi-transparent orange overlay represents the ground truth rendered with URDF from the same camera view. The left shows the results of ICP, which have larger errors, while the right shows the results after further fine-grained optimization using differentiable rendering. + +![](images/b14d38a8e57726af6c838d49b428567bd4e1a21daa955cdaf7fc7f49c8370334.jpg) +Fig. 4: Illustration of frame alignment with differentiable rendering. The loss is calculated between the mask rendered using Gaussian Splatting and the mask rendered with URDF. Subsequently, backpropagation and gradient descent are used to optimize the translation, rotation, and scale, which are then applied to the 3D Gaussians. + +rewrite the objective function as $\mathcal{L}_{\mathrm{align}}(\hat{T}_{\mathrm{rel}})$ and optimize $\hat{T}_{\mathrm{rel}}$ through gradient descent. The optimized $\hat{T}_{\mathrm{rel}}$ is composed with $\hat{T}_{\mathrm{URDF, scene}}^{0}$ , the result of which is applied to $\mathcal{G}_{\mathrm{scene}}$ to form the scene reconstruction in $\mathcal{F}_{\mathrm{real}}$ . We refer to the aligned 3D Gaussians as $\mathcal{G}_{\mathrm{scene}}^{*}$ . + +In order to decompose the scene into different parts, we first leverage Grounded-SAM [41] to perform task-related object segmentation. Then, the masked images are used to reconstruct 3D Gaussians for the objects. The 3D Gaussians corresponding to each link of the robot are segmented using the point cloud of each link in $\mathcal{F}_{\mathrm{URDF}}$ , which can be obtained with the robot's URDF and the renderer. Specifically, if the position of a 3D Gaussian is within a threshold distance from the point cloud of a link, the 3D Gaussian is assigned to that link. If a 3D Gaussian does not belong to any object or any link of the robot, it is classified as background. We suppose that the robot has $l$ links and there are totally $k$ objects in the scene. The reconstructed robot links, objects, and background are denoted as $\mathcal{G}_{\mathrm{robot}}^* = \{\mathcal{G}_{\mathrm{robot},i}^*\}_{i=1}^l$ , $\mathcal{G}_{\mathrm{obj}}^* = \{\mathcal{G}_{\mathrm{obj},j}^*\}_{j=1}^k$ , and $\mathcal{G}_{\mathrm{bg}}^*$ + +respectively. + +Similar to our frame alignment strategy, we utilize differentiable rendering to estimate the deployed camera poses in order to narrow the gap between the generated data and the deployment environment. The camera extrinsics are optimized through gradient descent, with the optimization objective: + +$$ +\mathcal {L} _ {\text {c a m e r a}} = S S I M \left(\mathcal {I} _ {\text {E x p e r t}}, \mathcal {I} _ {\text {G a u s s i a n}}\right) ^ {2}, +$$ + +where $\mathcal{I}_{\mathrm{Expert}}$ denotes the image obtained from the collected expert demonstration, $\mathcal{I}_{\mathrm{Gaussian}}$ represents the rendered image with reconstructed 3D Gaussians, and SSIM refers to Structural Similarity, which measures the perceptual similarity between two images. + +Nonetheless, before moving on to novel demonstration generation, we need to figure out how to generate 3D Gaussians for the robot under novel joint configurations. To achieve that, we leverage the link-wise Gaussians $\{\mathcal{G}_{\mathrm{robot},i}^{*}\}_{i = 1}^{l}$ and the default joint configuration $q_{\mathrm{default}}$ . For each link $1 \leqslant i \leqslant l$ , we access its relative pose to robot base frame under arbitrary joint configuration $q$ through forward kinematics, denoted as $\mathcal{T}_{\mathrm{fk}}^i(q)$ . Hence, by transforming each link $i$ with $\mathcal{T}_{\mathrm{fk}}^i(q)\mathcal{T}_{\mathrm{fk}}^i(q_{\mathrm{default}})^{-1}$ , we derive the corresponding 3D Gaussians under configuration $q$ . The entire 3D Gaussians are thereby derived by composing Gaussians of all $l$ links. As for the manipulated objects, we apply transformations in a similar manner. The way 3D Gaussians are transformed is detailed in Appendix A. + +# B. Novel Demonstration Generation + +Utilizing 3D Gaussians in $\mathcal{F}_{\mathrm{real}}$ , we implement our demonstration augmentation process, which systematically enhances the expert demonstration $\mathcal{D}_{\mathrm{expert}}$ across six aspects: object poses, object types, camera views, embodiment types, scene appearance, and lighting conditions. + +# 1) Object Pose + +To perform object pose augmentation, we first extract keyframes from the expert demonstration using a heuristic approach. Whenever the gripper action toggles or joint velocities approach zero, we consider the current time step as a keyframe and record the end-effector pose with respect to robot base frame. After that, we apply rigid transformations to the target objects that are involved in the expert demonstration. The end-effector poses at keyframes are transformed equivariantly according to the target object. Eventually, we generate trajectories between consecutive keyframe poses with motion planning, the combination of which makes a complete augmented demonstration with novel object poses. + +# 2) Object Type + +The object types can be augmented with 3D Content Generation. We first prompt GPT-4 [2] to generate approximately 50 names of objects that can be grasped. Then, we use these object names as prompts to generate corresponding 3D Gaussians with a 3D content generation model [57]. We utilize an off-the-shelf grasping algorithm [16] to generate grasp poses with respect to the object frame. As we generate different object poses for augmentation, we obtain the corresponding + +end-effector poses by composing object pose and the grasp pose relative to the object, which turn into the keyframe poses in new demonstrations. The entire augmented trajectory is generated in the same manner as IV-B1. + +# 3) Camera View + +One merit of 3DGS lies in its ability to perform novel view synthesis. Thereby, we are able to choose different camera poses from $\mathcal{D}_{\mathrm{expert}}$ and obtain novel-view demonstrations. Although we can render novel-view observations from arbitrary camera pose, we need to ensure that the augmented camera view does not deviate so much from the expert that it loses sight of the manipulation scene. Hence, we first designate a target point $O_{c} = (x_{c},y_{c},z_{c})$ in $\mathcal{F}_{\mathrm{real}}$ , towards which the camera should face during the entire episode. We then define a coordinate frame $\mathcal{F}_c$ , whose origin is $O_{c}$ and orientation is the same as $\mathcal{F}_{\mathrm{real}}$ . The position of camera is represented by spherical coordinates $(r,\theta ,\varphi)$ in $\mathcal{F}_c$ . Thus, by limiting the target point within the manipulation scene and randomizing the spherical coordinates, we are able to generate camera poses that produce meaningful observations yet possess diversity. The hyperparameters of randomization for the target point and the spherical coordinates are detailed in Appendix B. + +# 4) Embodiment Type + +To generalize the expert demonstration to different types of robots, we replace $\mathcal{G}_{\mathrm{robot}}^*$ with the 3D Gaussians of another embodiment, dubbed $\mathcal{G}_{\mathrm{robot}}^{\mathrm{new}}$ , which is attained from the corresponding URDF file or real-world reconstruction. The keyframe end-effector poses are reused because they are embodiment-agnostic action representations. Hence, through motion planning, we can easily derive the end-effector poses and joint positions of the new embodiment for all time steps in augmented demonstrations. The 3D Gaussians of the new embodiment under novel joint configurations is obtained from $\mathcal{G}_{\mathrm{robot}}^{\mathrm{new}}$ as mentioned in Sec. IV-A. The policy trained on these augmented demonstrations is directly deployed on novel embodiments. + +# 5) Scene Appearance + +Inconsistency between scene appearance accounts for a large visual gap between training and deployment environments. To resolve this issue, we propose to exploit reconstructed diverse 3D scenes and also large-scale image datasets to augment the scene appearance. We adopt COCO [33] as the image dataset, and attach images to the table top and background 3D Gaussian planes that surround the entire manipulation scene. Moreover, we gather datasets for 3D reconstruction [22, 66, 26, 4], and derive corresponding 3D Gaussians by 3DGS training. The resulting 3D Gaussian scenes substitute for $\mathcal{G}_{\mathrm{bg}}^*$ , forming novel scene appearance for data augmentation. The edge of utilizing reconstructed 3D scenes is their consistent and diverse geometry across multiple camera views, which helps produce more realistic demonstrations. Nevertheless, due to the expense of 3DGS training on large-scale reconstruction datasets, we complement them with 2D images for greater appearance diversity. + +# 6) Lighting Condition + +Discrepancy in lighting conditions is another barrier to deploying trained policy in unseen scenarios. To compensate for that, we augment the diffuse color of each Gaussian in the reconstructed scene through random scaling, offset, and noise. Concretely, for a Gaussian with original diffuse color $(r,g,b)$ , the augmented diffuse color values can be expressed as $(s_r r + o_r + \Delta_r, s_g g + o_g + \Delta_g, s_b b + o_b + \Delta_b)$ , where $(s_r, s_g, s_b)$ stand for scaling factors, $(o_r, o_g, o_b)$ stand for offsets, and $(\Delta_r, \Delta_g, \Delta_b)$ stand for random Gaussian noise. The scaling factors and offsets simulate changes in color contrast and scene brightness. Thus, they are shared among all the Gaussians in the scene. On the other hand, the random Gaussian noise is sampled independently for each Gaussian to simulate noise in images captured by cameras. The details of scaling factors, offsets, and Gaussian noise are elaborated in Appendix B. + +An illustration of augmented demonstrations with six types of generalizations can be found in Appendix B. + +# C. Policy Training + +We employ a modern, widely adopted transformer-based architecture [18, 51, 38, 55] to serve as the policy network, which is detailed in Appendix C. We process RGB images with ResNet-18 [21], and encode joint state using a multilayer perceptron (MLP). The latent of images and robot state is fed into a transformer encoder. Finally, an action decoder utilizes an MLP to convert the action latent into the action vector $a_{t}$ . The policy is trained with Behavioural Cloning (BC) in an end-to-end manner, aiming to maximize the likelihood of expert actions in demonstrations. We denote $o_k \triangleq (I_k, q_k)$ as the observation at the $k$ -th frame of demonstrations $\mathcal{D}$ , and $\pi$ as our policy. The loss function can then be expressed as + +$$ +\mathcal {L} ^ {\mathrm {B C}} = \mathbb {E} _ {(o _ {k}, a _ {k}) \sim \mathcal {D}} \| a _ {k} - \pi (o _ {k}) \| ^ {2}. +$$ + +Specifically, $I_{k}$ consists of two images from different eye-on-base cameras. We adopt relative end-effector pose as the action representation, which depicts the relative transformation between two consecutive end-effector poses under robot base frame. Further details of the training process can be found in Appendix D. + +# V. EXPERIMENTS + +We conduct comprehensive experiments in the real world to verify the effectiveness of our demonstration generation pipeline. Specifically, we aim to answer: given a single expert demonstration and multi-view images of the scene, + +1) How efficient is data generation compared to manually collecting data? +2) How does the policy trained on generated demonstrations perform across various tasks compared to that trained on manually collected data? +3) How does the policy perform as the generated data scale up? +4) Can generated demonstrations enhance the robustness of the policy when facing various deployment settings, such + +![](images/dd23fb58097a52a0676ee630392ebd652e6e0cd011735ee77e91a73c4726d8b8.jpg) +Fig. 5: Real-world experiment setup. We employ a Franka Research 3 Robot and two eye-on-base RealSense D435i cameras. + +as changes in object types, camera views, scene appearance, lighting conditions, and embodiment types? + +# A. Experimental Setup + +The real-world experiment setup is presented in Fig. 5. Concretely, we collect the expert demonstration on Franka Research 3 (FR3) Robot. Two Intel Realsense D435i eye-on-base cameras are mounted on the table top, capturing RGB image observations for the policy. We employ a 3D SpaceMouse to collect teleoperated demonstrations at a frequency of $10\mathrm{Hz}$ . Policy inference is carried out on an NVIDIA RTX4090 GPU, with a latency of 0.1s imposed. + +In order to manifest the generalization ability of our pipeline to different task settings, we select five tasks for evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place, and Sweep. + +In Pick Object task, the policy picks up a target object which is placed at different poses within a $30\mathrm{cm}\times 40\mathrm{cm}$ workspace. In CloseDrawer task, the policy closes a drawer whose position is constrained to a $15\mathrm{cm}\times 40\mathrm{cm}$ workspace, while its rotation about the z-axis is restricted to $\left[-\frac{\pi}{8},\frac{\pi}{8}\right]$ . In Pick-Place-Close task, the policy is expected to grasp an object, place it in the drawer, and then close the drawer. The drawer is placed in a $5\mathrm{cm}\times 5\mathrm{cm}$ workspace, with a fixed orientation. The target object is located in a $10\mathrm{cm}\times 10\mathrm{cm}$ workspace, whose rotation falls into range $\left[-\frac{\pi}{8},\frac{\pi}{8}\right]$ . In Dual Pick-Place task, the policy attempts to pick two target objects in a row and place them in a fixed drawer. Both of the objects are located in $10\mathrm{cm}\times 10\mathrm{cm}$ workspaces, with yaw angles between $-\frac{\pi}{8}$ and $\frac{\pi}{8}$ . In Sweep task, the robot should first pick up a broom and then sweeps the chocolate beans into a dustpan. The broom is randomly placed within a $10\mathrm{cm}\times 10\mathrm{cm}$ area, and the chocolate beans are randomly placed on the chopping board. Task setups are illustrated in Fig. 6. These five tasks require proficiency in executing basic pick-and-place actions, manipulating articulated objects, performing long-horizon tasks, and demonstrating skills involving tool use and + +![](images/53bdabfc3bcfe1e3547e7ff8ab00b81643d6111bcc3f0286b640241c72bcd876.jpg) +Fig. 6: Task illustration. We design five manipulation tasks for real-world evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place and Sweep, whose details are elaborated in Sec. V-A. + +![](images/b9a164729ad40c6dadad6a0d166fe8cdcb32404ecd6fce30f340c478aaf9a819.jpg) + +![](images/8549171b20db883a106ecd8a4fb7e8188d2842e143a98d206dfd88352a0f646f.jpg) + +![](images/056fc96eff5a26b8330467dc5ccaab099169d151244129e6a8ca159a68dc2a9f.jpg) + +![](images/60af5eb5b5e6d990c8aedc4700c76702cdd364cee0151b662b675636ff1cd24d.jpg) + +functional motion. Together, they provide a comprehensive evaluation across various task settings. + +We also conduct extensive real-world experiments to prove the effectiveness of our data generation pipeline in terms of different types of generalization. Notably, the evaluation of object pose generalization is incorporated into all experiments, including those focused on the other five types of generalization (object types, camera views, embodiment types, lighting conditions, and scene appearance). This is because object pose generalization is a fundamental requirement for task completion ability. For the other five types of generalization, the details are provided in Sec. V-D. Success rate (SR) is chosen as the evaluation metric in all experiments. Each policy is evaluated with 30 trials for a certain evaluation setting. + +# B. Efficiency of Augmenting Demonstrations + +To answer Question 1, we need to justify that our pipeline is economical with both labor and time when generating data. The labor-saving property is obvious because demonstrations are generated automatically in our pipeline. We compare the average time consumption of manually collecting a real-world demonstration to that of generating a demonstration through our pipeline. Specifically, we adopt eight processes on an NVIDIA RTX 4090 GPU for paralleled data generation to efficiently utilize computational resources. + +The comparison study is conducted on all five tasks, and the result is shown in Table I. Our data generation pipeline that executed on a single GPU is more than 29 times faster than collecting data in the real world, with an average time consumption of 0.64s across all five tasks. With no human interference, our demonstration generation approach is able to generate visually diverse training data with little time expenditure. + +# C. Performance of the Policy Trained on Augmented Data + +To answer Question 2 and 3, we compare the policies trained on generated demonstrations and manually collected demonstrations in terms of their success rates when facing various object poses. Moreover, we explore the performance of policies as generated data gradually scale up. + +The main results of the experiment are illustrated in Fig. 7. While policies trained on real-world demonstrations still have an edge over those trained on the same number of generated ones, our method manifests salient improvement in success rate as the generated demonstrations scale up. Concretely, + +visuomotor policies trained on 800 generated demonstrations achieve comparable performance to those trained on 200 manually collected demonstrations. Moreover, training with 1800 generated demonstrations raises the success rate to an average of $94.7\%$ , significantly surpassing the success rate achieved with 200 manually collected demonstrations. It is also worth mentioning that the policy achieves a $96.7\%$ success rate on Dual Pick-Place task with our generated data, which is nearly $20\%$ higher than the baseline (manually collected). These findings testify the effectiveness of our method in generating novel object poses for better generalization of visuomotor policies, and indicate promising scaling property as generated data scales up. + +# D. Robustness when Facing Various Deployment Settings + +To answer Question 4, we augment the expert demonstration in five different dimensions: lighting conditions, scene appearance, camera views, object types, and embodiment types. We compare policies trained on real-world data, real-world data augmented using 2D augmentation approaches, and data generated via our pipeline. An illustration of the experiments for different generalization types is shown in Fig. 8. + +# 1) Lighting Condition + +To demonstrate the effectiveness of lighting augmentation in our approach, we adopt five different scenarios for policy deployment, which are shown in Appendix E. We compare the performance of four policies that are trained respectively on: + +1) 200 real-world demonstrations (Collected); +2) 1800 generated demonstrations with only object pose augmentation, which are the same as data used in V-C (Ours Pose-Only); +3) real-world demonstrations augmented with color jitter (Color Jitter); +4) 3200 demonstrations generated by our pipeline with both lighting condition and object pose augmentation (Ours). + +As shown in Fig. 9, policies trained on augmented lighting conditions achieve an average of over $80\%$ success rate across Pick Object, Close Driver, and Pick-Place-Close tasks, with an overall improvement over those trained on real-world data without augmentation by $70\%$ . Furthermore, our policies show a significant edge over those trained on generated demonstrations with augmented object poses and real-world demonstrations augmented with color jitter, justifying the validity of lighting augmentation in our pipeline. + +TABLE I: Comparison of demonstration collection time (s). We calculate the average time cost of data collection of a single demonstration over 100 demonstrations. Our method achieves more than 29 times the speed compared to the baseline. + +
Task TypePick ObjectClose PrinterPick-Place-PrintDual Pick-PlaceSweepAverage
Real-world13.210.124.727.020.419.1
Ours0.430.340.861.00.580.64
+ +![](images/7c0ee42e2207295b362773c05b96cce7bdf9cb3c5a14fb57c35ec76aca30ed78.jpg) + +![](images/ed7242ed314a3d14a5e43aeeb9a806ab6c3d6c1e23699bf13089e97f9fecf66c.jpg) + +![](images/0c0dd19ef5080d4329d728c1edeb0a459f24a88a67d8519ac275762bf8ef0154.jpg) + +![](images/d412f2e90064cb5e4add7b49aa82fb97fd4fd497c8072ee8f2634f614d4c1693.jpg) +Fig. 7: Main results. Top left: We present the average success rate across five tasks. Our method shows promising scalability as the number of demonstration grows. The other five subfigures: For each task, we evaluate the success rate of policies trained from manually collected data and those generated by our method over 30 trials, using different number of demonstrations. + +![](images/c99791955c59958e854fdf45de3a0ffb3663991c619063f2566d76acdee2993c.jpg) + +![](images/204cc20f13470490e19076be5708bdccc00578f619442b1173ebe8c8a84f6755.jpg) + +# 2) Scene Appearance + +Similar to the experiment on lighting conditions, we select five different scenarios for evaluation on scene appearance augmentation, which is illustrated in Appendix E. The four policies for comparison are trained in a similar manner as described in Sec. V-D1, with the key difference being that we employ image inpainting methods [68, 9, 67, 10] as more robust and suitable 2D augmentation baselines for appearance generalization. The results are shown in Fig. 9. The policy trained on data generated through our pipeline, incorporating both appearance and object pose augmentations, achieves superior performance compared to all baselines. Notably, it demonstrates over a $70\%$ increase in success rates across all three tasks when compared to policies trained on data without appearance augmentation. In particular, our policy achieves $100\%$ success rate on the Pick Object task, showcasing strong robustness against various background appearance. + +# 3) Camera View + +We employ two different settings for camera view generalization: novel view and moving view. In novel view experiments, we select 30 poses for each camera, which are different from the training perspective. On the other hand, cameras are kept moving in moving view experiments. Similar to Sec. V-D1 and Sec. V-D2, we compare the performance of four policies that are trained respectively on: + +1) 200 real-world demonstrations (Collected); +2) 1800 generated demonstrations with only object pose augmentation (Ours Pose-Only); +3) 3200 demonstrations stemmed from 200 real-world + +demonstrations, augmented using VISTA [50], which leverages novel view synthesis models to augment data from different views; + +4) 3200 generated demonstrations with camera view augmentation (Ours). + +We present the results in Table II. Our policy is able to perform Pick Object task and Pick-Place-Close task with success rates of over $80\%$ and $50\%$ respectively, while the policies trained on data without augmentation can barely accomplish the task. Our approach also outperforms VISTA by a large margin. Notably, our policy achieves nearly $100\%$ success rate on CloseDrawer task, manifesting strong robustness against novel camera views and moving cameras. + +# 4) Object Type + +In order to demonstrate the effectiveness of our method in augmenting object types, we compare the performance of three different policies that are respectively trained on: + +1) 400 real-world demonstrations with 5 real-world objects (Collected); +2) 6400 demonstrations stemmed from 200 real-world demonstrations, augmented using ROSIE [67], which utilizes image inpainting models to generate data with unseen objects; +3) 6400 demonstrations generated by our pipeline with object type augmentation (Ours). + +During deployment, we select five real-word objects that are different from all the objects covered in training process. We report the result in Fig. 10. The policy trained on 50 object + +![](images/5fc498dcd53b37e9bf2a31cb0c5a87fdec8b68b0eeafba95226135e5bb3b20ad.jpg) +Fig. 8: Illustration of real-world experiments for different generalization types. The data is collected in the original setting. When deploying the trained policy, we modify object poses, lighting conditions, scene appearance, camera views, object types, and embodiments to evaluate the robustness in different scenarios. + +![](images/2e3c1d8bf3f055ec267b19fbbcf4be774fe8fdaaaf18d9c9511fd4775d308de0.jpg) +Fig. 9: Performance when changing lighting conditions and appearance. We report the success rate of different policies under various lighting conditions and appearance. The policies trained with generated demonstrations with corresponding augmentations manifest remarkable advance compared to baseline policies. + +![](images/593193a036e1192cd5ed93966c35e5f47f82a3d6aec90ed02ce3e52b86db2fbb.jpg) + +types showcases better adaptability to novel object types, improving the success rate of baseline models by over $40\%$ . This demonstrates the effectiveness of our data generation pipeline in utilizing off-the-shelf 3D Content Generation models to generalize policy to novel objects. + +![](images/b5af8d67c630930530fc45bc66a04dbfea14f7bd87c0b73e0f0ba1c940517e9f.jpg) +Fig. 10: Performance on novel object types. The policy trained on data generated by RoboSplat shows a salient edge over baseline policies. + +# 5) Embodiment Type + +Our method supports generating demonstrations across different embodiment types as mentioned in Sec. IV-B4. To prove that, based on one demonstration collected with the Franka Research 3, we generate novel demonstrations for a UR5e robot equipped with a Robotiq 2F-85 gripper and deploy the learned policy directly in the real world. It is worth noting that policies trained on Franka Research 3 robot demonstrations fail to be deployed on UR5e robot due to frequent safety violations. We compare the performance of policies trained on embodiment-augmented demonstrations with those trained on data augmented using RoVi-Aug [8]. RoVi-Aug modifies real-world demonstrations by replacing the appearance of the embodiment through generative models. + +We present the performance of policies in Fig. 11. Policies trained on data generated using our pipeline achieve a success + +TABLE II: Performance when changing camera view. We compare the success rate of different policies under two circumstances: novel camera view and moving camera view. The policies trained on demonstrations augmented using our approach showcase significant improvement over baseline policies. + +
Data SourcePick ObjectClose PrinterPick-Place-CloseAverage
Novel ViewMoving ViewNovel ViewMoving ViewNovel ViewMoving View
Collected6.70.016.713.30.00.06.1
Ours Pose-Only0.00.026.730.00.00.09.5
VISTA [50]33.333.356.770.033.316.740.6
Ours90.086.7100.096.753.356.780.6
+ +rate close to $100\%$ on an embodiment different from the one used for demonstration collection. This result highlights its superior performance compared to the baseline in cross-embodiment transfer. + +![](images/f44bde63f046c6bacfede4ff3776e2d1fd5281da40a9fa7ceabbe1c7f488d922.jpg) +Fig. 11: Performance on cross embodiment experiments. We evaluate the learned policy directly on the UR5e robot and achieve a nearly $100\%$ success rate that surpasses the 2D augmentation methods. + +# VI. LIMITATIONS + +Due to the limitations of naive 3D Gaussian Splatting, it is incapable of handling deformable objects. Additionally, the pipeline lacks physical constraints, making it unsuitable for contact-rich and dynamic tasks. However, recent advancements in Gaussian Splatting [58, 1, 64, 42] provide promising opportunities to address these challenges. Future work could apply these techniques to generate data for a wider range of tasks. + +# VII. CONCLUSION + +In this work, we introduce RoboSplat, a novel demonstration generation approach that requires only a single collected demonstration and generates diverse and high-quality data for policy learning. Comprehensive real-world experiments show that our approach significantly enhances the robustness of visuomotor policies when encountering various disturbances. + +# ACKNOWLEDGMENTS + +We sincerely thank Yang Tian and Xiao Chen for their fruitful discussions. This work is supported by the National Key R&D Program of China (2022ZD0160201), Shanghai Artificial Intelligence Laboratory, and China Postdoctoral Science Foundation (2023M741848). + +# REFERENCES + +[1] Jad Abou-Chakra, Krishan Rana, Feras Dayoub, and Niko Suenderhauf. Physically embodied gaussian splatt-ting: A visually learnt and physically grounded 3d representation for robotics. In 8th Annual Conference on Robot Learning, 2024. +[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +[3] Ezra Ameperosa, Jeremy A Collins, Mrinal Jain, and Animesh Garg. Rocoda: Counterfactual data augmentation for data-efficient robot learning from demonstrations. arXiv preprint arXiv:2411.16959, 2024. +[4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5470–5479, 2022. +[5] Paul J Besl and Neil D McKay. Method for registration of 3-d shapes. In Sensor fusion IV: control paradigms and data structures, volume 1611, pages 586-606. Spie, 1992. +[6] Ondrej Biza, Skye Thompson, Kishore Reddy Pagidi, Abhinav Kumar, Elise van der Pol, Robin Walters, Thomas Kipf, Jan-Willem van de Meent, Lawson LS Wong, and Robert Platt. One-shot imitation learning via interaction warping. arXiv preprint arXiv:2306.12392, 2023. +[7] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023. +[8] Lawrence Yunliang Chen, Chenfeng Xu, Karthik Dharmarajan, Muhammad Zubair Irshad, Richard Cheng, Kurt Keutzer, Masayoshi Tomizuka, Quan Vuong, and Ken Goldberg. Rovi-aug: Robot and viewpoint augmentation for cross-embodiment robot learning. arXiv preprint arXiv:2409.03403, 2024. +[9] Zoey Chen, Sho Kiami, Abhishek Gupta, and Vikash + +Kumar. Genaug: Retargeting behaviors to unseen situations via generative augmentation. arXiv preprint arXiv:2302.06671, 2023. +[10] Zoey Chen, Zhao Mandi, Homanga Bharadhwaj, Mohit Sharma, Shuran Song, Abhishek Gupta, and Vikash Kumar. Semantically controllable augmentations for generalizable robot learning. The International Journal of Robotics Research, page 02783649241273686, 2024. +[11] Cheng Chi, Zhenjia Xu, Siyuan Feng, Eric Cousineau, Yilun Du, Benjamin Burchfiel, Russ Tedrake, and Shuran Song. Diffusion policy: Visuomotor policy learning via action diffusion. The International Journal of Robotics Research, page 02783649241273668, 2023. +[12] Cheng Chi, Zhenjia Xu, Chuer Pan, Eric Cousineau, Benjamin Burchfiel, Siyuan Feng, Russ Tedrake, and Shuran Song. Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots. arXiv preprint arXiv:2402.10329, 2024. +[13] Ethan Chun, Yilun Du, Anthony Simeonov, Tomas Lozano-Perez, and Leslie Kaelbling. Local neural descriptor fields: Locally conditioned object representations for manipulation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 1830-1836. IEEE, 2023. +[14] Murtaza Dalal, Min Liu, Walter Talbott, Chen Chen, Deepak Pathak, Jian Zhang, and Ruslan Salakhutdinov. Local policies enable zero-shot long-horizon manipulation. arXiv preprint arXiv:2410.22332, 2024. +[15] Linxi Fan, Guanzhi Wang, De-An Huang, Zhiding Yu, Li Fei-Fei, Yuke Zhu, and Anima Anandkumar. Secant: Self-expert cloning for zero-shot generalization of visual policies. arXiv preprint arXiv:2106.09678, 2021. +[16] Hao-Shu Fang, Chenxi Wang, Hongjie Fang, Minghao Gou, Jirong Liu, Hengxu Yan, Wenhai Liu, Yichen Xie, and Cewu Lu. Anygrasp: Robust and efficient grasp perception in spatial and temporal domains. IEEE Transactions on Robotics, 2023. +[17] Jian Gao, Chun Gu, Youtian Lin, Zhihao Li, Hao Zhu, Xun Cao, Li Zhang, and Yao Yao. Relightable 3d gaussians: Realistic point cloud relighting with brdf decomposition and ray tracing. In European Conference on Computer Vision, pages 73-89. Springer, 2025. +[18] Siddhant Haldar, Zhuoran Peng, and Lerrel Pinto. Baku: An efficient transformer for multi-task policy learning. arXiv preprint arXiv:2406.07539, 2024. +[19] Nicklas Hansen and Xiaolong Wang. Generalization in reinforcement learning by soft data augmentation. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13611-13617. IEEE, 2021. +[20] Nicklas Hansen, Hao Su, and Xiaolong Wang. Stabilizing deep q-learning with convnets and vision transformers under data augmentation. Advances in neural information processing systems, 34:3680-3693, 2021. +[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision + +and pattern recognition, pages 770-778, 2016. +[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (ToG), 37(6):1-15, 2018. +[23] Alex Irpan, Alexander Herzog, Alexander Toshkov Toshev, Andy Zeng, Anthony Brohan, Brian Andrew Ichter, Byron David, Carolina Parada, Chelsea Finn, Clayton Tan, et al. Do as i can, not as i say: Grounding language in robotic affordances. In Conference on Robot Learning, number 2022, 2022. +[24] Mazeyu Ji, Ri-Zhao Qiu, Xueyan Zou, and Xiaolong Wang. Graspsplats: Efficient manipulation with 3d feature splatting. arXiv preprint arXiv:2409.02084, 2024. +[25] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023. +[26] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017. +[27] Georgios Kopanas, Thomas Leimkuhler, Gilles Rainer, Clément Jambon, and George Drettakis. Neural point catauastics for novel-view synthesis of reflections. ACM Transactions on Graphics (TOG), 41(6):1-15, 2022. +[28] Ilya Kostrikov, Denis Yarats, and Rob Fergus. Image augmentation is all you need: Regularizing deep reinforcement learning from pixels. arXiv preprint arXiv:2004.13649, 2020. +[29] Misha Laskin, Kimin Lee, Adam Stooke, Lerrel Pinto, Pieter Abbeel, and Aravind Srinivas. Reinforcement learning with augmented data. Advances in neural information processing systems, 33:19884-19895, 2020. +[30] Mara Levy, Siddhant Haldar, Lerrel Pinto, and Abhinav Shirivastava. P3-po: Prescriptive point priors for visuospatial generalization of robot policies. arXiv preprint arXiv:2412.06784, 2024. +[31] Xinhai Li, Jialin Li, Ziheng Zhang, Rui Zhang, Fan Jia, Tiancai Wang, Haoqiang Fan, Kuo-Kun Tseng, and Ruiping Wang. Robogsim: A real2sim2real robotic gaussian splatting simulator. arXiv preprint arXiv:2411.11839, 2024. +[32] Zhihao Liang, Qi Zhang, Ying Feng, Ying Shan, and Kui Jia. Gs-ir: 3d gaussian splatting for inverse rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21644–21653, 2024. +[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. +[34] Guanxing Lu, Shiyi Zhang, Ziwei Wang, Changliu Liu, + +Jiwen Lu, and Yansong Tang. Manigaussian: Dynamic gaussian splatting for multi-task robotic manipulation. In European Conference on Computer Vision, pages 349-366. Springer, 2025. +[35] Zhao Mandi, Homanga Bharadhwaj, Vincent Moens, Shuran Song, Aravind Rajeswaran, and Vikash Kumar. Cacti: A framework for scalable multi-task multi-scene visual imitation learning. arXiv preprint arXiv:2212.05711, 2022. +[36] Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Rohun Kulkarni, Li Fei-Fei, Silvio Savarese, Yuke Zhu, and Roberto Martin-Martín. What matters in learning from offline human demonstrations for robot manipulation. arXiv preprint arXiv:2108.03298, 2021. +[37] Ajay Mandlekar, Soroush Nasiriany, Bowen Wen, Iretiayo Akinola, Yashraj Narang, Linxi Fan, Yuke Zhu, and Dieter Fox. Mimicgen: A data generation system for scalable robot learning using human demonstrations. arXiv preprint arXiv:2310.17596, 2023. +[38] Octo Model Team, Dibya Ghosh, Homer Walke, Karl Pertsch, Kevin Black, Oier Mees, Sudeep Dasari, Joel Hejna, Charles Xu, Jianlan Luo, Tobias Kreiman, You Liang Tan, Lawrence Yunliang Chen, Pannag Sanketi, Quan Vuong, Ted Xiao, Dorsa Sadigh, Chelsea Finn, and Sergey Levine. Octo: An open-source generalist robot policy. In Proceedings of Robotics: Science and Systems, Delft, Netherlands, 2024. +[39] Abby O'Neill, Abdul Rehman, Abhinav Gupta, Abhiram Maddukuri, Abhishek Gupta, Abhishek Padalkar, Abraham Lee, Acorn Pooley, Agrim Gupta, Ajay Mandlekar, et al. Open x-embodiment: Robotic learning datasets and rt-x models. arXiv preprint arXiv:2310.08864, 2023. +[40] Mohammad Nomaan Qureshi, Sparsh Garg, Francisco Yandun, David Held, George Kantor, and Abhisesh Silwal. Splatsim: Zero-shot sim2real transfer of rgb manipulation policies using gaussian splatting. arXiv preprint arXiv:2409.10161, 2024. +[41] Tianhe Ren, Shilong Liu, Ailing Zeng, Jing Lin, Kun-chang Li, He Cao, Jiayu Chen, Xinyu Huang, Yukang Chen, Feng Yan, et al. Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159, 2024. +[42] Boxiang Rong, Artur Grigorev, Wenbo Wang, Michael J Black, Bernhard Thomaszewski, Christina Tsalicoglou, and Otmar Hilliges. Gaussian garments: Reconstructing simulation-ready clothing with photorealistic appearance from multi-view video. arXiv preprint arXiv:2409.08189, 2024. +[43] Hyunwoo Ryu, Hong-in Lee, Jeong-Hoon Lee, and Jongeun Choi. Equivariant descriptor fields: Se (3)-equivariant energy-based models for end-to-end visual robotic manipulation learning. arXiv preprint arXiv:2206.08321, 2022. +[44] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on + +Computer Vision and Pattern Recognition (CVPR), 2016. +[45] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. +[46] Younggyo Seo, Junsu Kim, Stephen James, Kimin Lee, Jinwoo Shin, and Pieter Abbeel. Multi-view masked world models for visual robotic manipulation. In International Conference on Machine Learning, pages 30613-30632. PMLR, 2023. +[47] Ola Shorinwa, Johnathan Tucker, Aliyah Smith, Aiden Swann, Timothy Chen, Roya Firoozi, Monroe Kennedy III, and Mac Schwager. Splat-mover: Multi-stage, open-vocabulary robotic manipulation via editable gaussian splatting. arXiv preprint arXiv:2405.04378, 2024. +[48] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. In 2022 International Conference on Robotics and Automation (ICRA), pages 6394-6400. IEEE, 2022. +[49] Ritvik Singh, Arthur Allshire, Ankur Handa, Nathan Ratliff, and Karl Van Wyk. Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands. arXiv preprint arXiv:2412.01791, 2024. +[50] Stephen Tian, Blake Wulfe, Kyle Sargent, Katherine Liu, Sergey Zakharov, Vitor Guizilini, and Jiajun Wu. View-invariant policy learning via zero-shot novel view synthesis. arXiv preprint arXiv:2409.03685, 2024. +[51] Yang Tian, Sizhe Yang, Jia Zeng, Ping Wang, Dahua Lin, Hao Dong, and Jiangmiao Pang. Predictive inverse dynamics models are scalable learners for robotic manipulation. arXiv preprint arXiv:2412.15109, 2024. +[52] Marcel Torne, Anthony Simeonov, Zechu Li, April Chan, Tao Chen, Abhishek Gupta, and Pulkit Agrawal. Reconciling reality through simulation: A real-to-sim-to-real approach for robust manipulation. arXiv preprint arXiv:2403.03949, 2024. +[53] Pietro Vitiello, Kamil Dreczkowski, and Edward Johns. One-shot imitation learning: A pose estimation perspective. arXiv preprint arXiv:2310.12077, 2023. +[54] Vitalis Vosylius and Edward Johns. Instant policy: Incontext imitation learning via graph diffusion. arXiv preprint arXiv:2411.12633, 2024. +[55] Hongtao Wu, Ya Jing, Chilam Cheang, Guangzeng Chen, Jiafeng Xu, Xinghang Li, Minghuan Liu, Hang Li, and Tao Kong. Unleashing large-scale video generative pretraining for visual robot manipulation, 2023. +[56] Yuxuan Wu, Lei Pan, Wenhua Wu, Guangming Wang, Yanzi Miao, and Hesheng Wang. Rl-gsbridge: 3d gaussian splatting based real2sim2real method for robotic manipulation learning. arXiv preprint arXiv:2409.20291, 2024. +[57] Jianfeng Xiang, Zelong Lv, Sicheng Xu, Yu Deng, Ruicheng Wang, Bowen Zhang, Dong Chen, Xin Tong, + +and Jiaolong Yang. Structured 3d latents for scalable and versatile 3d generation. arXiv preprint arXiv:2412.01506, 2024. +[58] Tianyi Xie, Zeshun Zong, Yuxing Qiu, Xuan Li, Yutao Feng, Yin Yang, and Chenfanfu Jiang. Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4389-4398, 2024. +[59] Zhengrong Xue, Shuying Deng, Zhenyang Chen, Yixuan Wang, Zhecheng Yuan, and Huazhe Xu. Demogen: Synthetic demonstration generation for data-efficient visuomotor policy learning. arXiv preprint arXiv:2502.16932, 2025. +[60] Jingyun Yang, Zi-ang Cao, Congyue Deng, Rika Antonova, Shuran Song, and Jeannette Bohg. Equibot: Sim (3)-equivariant diffusion policy for generalizable and data efficient learning. arXiv preprint arXiv:2407.01479, 2024. +[61] Jingyun Yang, Congyue Deng, Jimmy Wu, Rika Antonova, Leonidas Guibas, and Jeannette Bohg. Equiv-act: Sim (3)-equivariant visuomotor policies beyond rigid object manipulation. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 9249–9255. IEEE, 2024. +[62] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024. +[63] Sizhe Yang, Yanjie Ze, and Huazhe Xu. Movie: Visual model-based policy adaptation for view generalization. Advances in Neural Information Processing Systems, 36, 2024. +[64] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20331-20341, 2024. +[65] Mingqiao Ye, Martin Danelljan, Fisher Yu, and Lei Ke. Gaussian grouping: Segment and edit anything in 3d scenes. In European Conference on Computer Vision, pages 162-179. Springer, 2025. +[66] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22, 2023. +[67] Tianhe Yu, Ted Xiao, Austin Stone, Jonathan Tompson, Anthony Brohan, Su Wang, Jaspiar Singh, Clayton Tan, Jodilyn Peralta, Brian Ichter, et al. Scaling robot learning with semantically imagined experience. arXiv preprint arXiv:2302.11550, 2023. +[68] Chengbo Yuan, Suraj Joshi, Shaoting Zhu, Hang Su, + +Hang Zhao, and Yang Gao. Roboengine: Plug-and-play robot data augmentation with semantic robot segmentation and background generation. arXiv preprint arXiv:2503.18738, 2025. +[69] Zhecheng Yuan, Tianming Wei, Shuiqi Cheng, Gu Zhang, Yuanpei Chen, and Huazhe Xu. Learning to manipulate anywhere: A visual generalizable framework for reinforcement learning. arXiv preprint arXiv:2407.15815, 2024. +[70] Xinyu Zhang and Abdeslam Boullarias. One-shot imitation learning with invariance matching for robotic manipulation. arXiv preprint arXiv:2405.13178, 2024. +[71] Yuhang Zheng, Xiangyu Chen, Yupeng Zheng, Songen Gu, Runyi Yang, Bu Jin, Pengfei Li, Chengliang Zhong, Zengmao Wang, Lina Liu, et al. Gaussiangrasper: 3d language gaussian splatting for open-vocabulary robotic grasping. arXiv preprint arXiv:2403.09637, 2024. + +# APPENDIX + +# A. Applying Transformation and Scaling to 3D Gaussians + +This section outlines how to apply transformations (translation, rotation) and scaling to 3D Gaussians. + +The Gaussian primitive typically possesses three core properties: 1) a center position in three-dimensional space; 2) an orientation that specifies the tilt of its principal axes, commonly represented as a quaternion; 3) a scale indicating its width or narrowness. Additionally, Gaussian primitives can be enhanced with Spherical Harmonics (SH) to capture complex, direction-dependent color features. + +When applying a transformation to the Gaussian primitive, the following steps should be taken: 1) update the center position by scaling, rotating, and then adding the translation offset; 2) update the orientation by combining the existing rotation with the new rotation; 3) adjust the scale by multiplying by the scaling factor; 4) rotate the Spherical Harmonics coefficients by using the Wigner D matrices. + +# B. Details of Demonstration Augmentation Process + +We expand on the details of the demonstration augmentation process in this section. An illustration of augmented demonstrations is provided in Fig. 12. + +# 1) Object pose + +As mentioned in Sec. IV-B1, we transform the end-effector poses at key frames equivariantly according to the transformation that is applied to the target object. However, considering the symmetry of the gripper, we perform post-processing on the transformed end-effector pose. + +Suppose the rotation of the transformed end-effector pose can be expressed as $(r_x, r_y, r_z)$ in the format of XYZ Euler angles. We replace $r_z$ with $r_z'$ , which can be calculated as: + +$$ +r _ {z} ^ {\prime} = \left\{ \begin{array}{l l} r _ {z} & - \frac {\pi}{2} \leqslant r _ {z} \leqslant \frac {\pi}{2} \\ r _ {z} + \pi & r _ {z} < - \frac {\pi}{2} \\ r _ {z} - \pi & r _ {z} > \frac {\pi}{2}. \end{array} \right. +$$ + +The resulting Euler angles $(r_x, r_y, r_z')$ form the final rotation of the end-effector, which prevents the end-effector from performing redundant rotation along its $z$ -axis. + +# 2) Camera view + +As aforementioned in Sec. V-D3, we enumerate the hyperparameters of camera view augmentations and their range of randomization in Table III. Suppose the camera view in the expert demonstration has target point $O_{c}^{\mathrm{expert}} = (x_{c}^{0},y_{c}^{0},z_{c}^{0})$ and corresponding spherical coordinates $(r^0,\theta^0,\varphi^0)$ . Thereby, the target point $O_{c} = (x_{c},y_{c},z_{c})$ and corresponding spherical coordinates $(r,\theta ,\varphi)$ are sampled from uniform distributions, ranging between $(x_c^0\pm \Delta x_c,y_c^0\pm \Delta y_c,z_c^0\pm \Delta z_c,r^0\pm \Delta r,\theta^0\pm \Delta \theta ,\varphi^0\pm \Delta \varphi)$ . + +# 3) Lighting condition + +We present the hyperparameters of lighting condition augmentation in this section. First, we normalize the RGB values of each pixel with minimum value 0 and maximum value 1. + +TABLE III: Camera view augmentation hyperparameters and their range of randomization. + +
HyperparameterValue
Δxc0.1(m)
Δyc0.1(m)
Δzc0.1(m)
Δr0.2(m)
Δθπ/6
Δφπ/6
+ +Then, we stipulate that the hyperparameters are sampled from the following distributions: + +$$ +\begin{array}{l} \left(\Delta_ {r}, \Delta_ {g}, \Delta_ {b}\right) \sim \mathcal {N} (\mathbf {0}, 0. 1 ^ {2} \mathbf {I}), \\ s _ {r}, s _ {g}, s _ {b} \sim \text {U n i f o r m} (0. 3, 1. 8), \\ o _ {r}, o _ {g}, o _ {b} \sim \text {U n i f o r m} (- 0. 3, 0. 3). \\ \end{array} +$$ + +# C. Policy Architecture + +As illustrated in Fig. 13, the policy processes two types of inputs: images and robot states. We use different encoders to tokenize each modality accordingly. For image inputs, the images are first passed through a ResNet-18 vision encoder to generate visual embeddings. We employ a linear layer to extract compact visual features. For the robot state, we encode it into state tokens using a multi-layer perceptron (MLP). + +The multi-modal encoder in our model is based on a GPT-2 style transformer architecture. Before feeding the sequential image and state tokens into the transformer, we append readout tokens [ACT] to the end. These readout tokens attend to embeddings from different modalities, serving as action latents used for action prediction. + +Encoded by the multi-modal encoder, the action latents generated by the [ACT] tokens are fed into the readout decoders to predict actions. The action decoder utilizes an MLP to transform the action latent into the action vector. We predict a chunk of 10 future actions. Compared to single-step action prediction, predicting multiple steps provides temporal action consistency and robustness to idle actions [11]. + +# D. Training Details + +During training, the input at each timestep consists of two images captured from two eye-on-base cameras, along with the robot state. The robot state includes both the arm state and the gripper state. The gripper state is binary, indicating whether the gripper is open or closed. For the Franka FR3 robot, the arm state is 7-dimensional, while for the UR5e robot, it is 6-dimensional. + +The policy operates with a history length of 1, and the size of the action chunk is set to 10. During inference, we utilize temporal ensemble techniques to compute a weighted average of the multi-step actions. + +![](images/3d6896c1cb137ca01211f8119d84a7c86a3674a73e95b5832cbeaff0625eac10.jpg) +Fig. 12: Illustration of augmented demonstrations. Type of generalization from the top row to the bottom row: object pose, lighting condition, scene appearance, object type, camera view, and embodiment type. + +The policy is trained using a single NVIDIA RTX 4090 GPU, with a batch size of 256 and a learning rate of 1e-4. Depending on the number of demonstrations, the policy is trained for varying numbers of epochs. The hyperparameters used during training are detailed in Table IV. + +# E. Illustration of Real-World Experiment Settings + +We illustrate the experiment settings on lighting condition generalization in Fig. 14. The flashing light alternates between red and blue light at a frequency of $4\mathrm{Hz}$ . Every lighting condition takes up 6 trials in a single experiment. + +Besides, we present the real-world settings on appearance generalization in Fig. 15. Each scenario accounts for 5 trials in a single experiment. + +![](images/769c1cb609e8187e90aa9a54525152cb88b18d9c939ca5cea3edcf9e9d686ff0.jpg) +Fig. 13: Policy architecture. + +TABLE IV: Policy training hyperparameters. + +
Batch Size256
Learning Rate1e-4
Training Epochs1400 (100 demonstrations)
1000 (200 demonstrations)
800 (400 demonstrations)
700 (800 demonstrations)
500 (1800 demonstrations)
300 (3200 demonstrations)
200 (6400 demonstrations)
Image Size128*128
OptimizerAdamW
History Length1
Action Chunk Length10
+ +![](images/59207633f7c3959f655ac6bb14b32176d3c231d6318185a78be5b3be508d1b8a.jpg) +(a) Flashing light (Red) + +![](images/16dd2a576111e4901e9f33a58f76893230b27ec24632b63a391010d321ebb65c.jpg) +(b) Flashing light (Blue) + +![](images/329b55cea8fe8b03a6e530fbf3a83fabe0ed3b487f4a86120899ab12039a36be.jpg) +(c) Dark light + +![](images/13dcaa9e3a9dfa6d3ab14b9ff9eb273533771f796df5a8d843bd8476f6aafff4.jpg) +(d) Bright light + +![](images/d5097ed7bc541524999fba782271c6bc4995c4fd1d3def8c5567bd83a2267a53.jpg) +(e) Green light + +![](images/c977fae0b468eff6be7f5aa6a8aee6bcc3d814a04aeddbe4bf91e1d72ad21c80.jpg) +(f) Yellow light +Fig. 14: Illustration of real-world experiment on lighting generalization. + +![](images/2e0fd12ea391852ff252c6fa013f58c83c31631f5f61d1967a0de0a5897373ab.jpg) +(a) + +![](images/d9cd8ada7b70be3c181ebc936eaa3c6cb7f99d355e76e7d0442c35452458b5ee.jpg) +(b) + +![](images/7fb3bdba025175d006730e46359f428af9eb04d23d00c54192248e1fc363c727.jpg) +(c) + +![](images/713492fd54f41dfe9c9392349b53cc107ba56211f668b2e7a40851b6a50c577b.jpg) +(d) + +![](images/ce3b3d5d5e34b5db3092a19eba1ca889fe0cf2d6719286ac72de291560196841.jpg) +(e) + +![](images/b3bb47439709f6859a0518f05864ad27d39444c7291615594c3434026ef8983f.jpg) +(f) +Fig. 15: Illustration of real-world experiment on appearance generalization. \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13175/images/056fc96eff5a26b8330467dc5ccaab099169d151244129e6a8ca159a68dc2a9f.jpg b/data/2025/2504_13xxx/2504.13175/images/056fc96eff5a26b8330467dc5ccaab099169d151244129e6a8ca159a68dc2a9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ec8ff37d3a0f8297356c2806f158d009de7b3e8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/056fc96eff5a26b8330467dc5ccaab099169d151244129e6a8ca159a68dc2a9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:414fb83f0838d94163c8962330539dc64cd6dcbce8cc5f299de29882ad5a7dd9 +size 15920 diff --git a/data/2025/2504_13xxx/2504.13175/images/07c3f6229256043d17b3a2f319c0a1d3515f59cb4893de684e02b0fed26d18e1.jpg b/data/2025/2504_13xxx/2504.13175/images/07c3f6229256043d17b3a2f319c0a1d3515f59cb4893de684e02b0fed26d18e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36c6cf43a25ebcb92e8ebe5bcf6087de54d5eeb4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/07c3f6229256043d17b3a2f319c0a1d3515f59cb4893de684e02b0fed26d18e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fbd06f9a01e8e8fa2f0a3097ef457b19a89117f8b915f2a73156cb7b9b34451 +size 6033 diff --git a/data/2025/2504_13xxx/2504.13175/images/0c0dd19ef5080d4329d728c1edeb0a459f24a88a67d8519ac275762bf8ef0154.jpg b/data/2025/2504_13xxx/2504.13175/images/0c0dd19ef5080d4329d728c1edeb0a459f24a88a67d8519ac275762bf8ef0154.jpg new file mode 100644 index 0000000000000000000000000000000000000000..141fe2b9298da6a6ec55001b113c44b3d5197be7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/0c0dd19ef5080d4329d728c1edeb0a459f24a88a67d8519ac275762bf8ef0154.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4df9cb72029a65fd984f9ee27c18571b71da0d07975dfae6d3ab3ce88de1dbc9 +size 8890 diff --git a/data/2025/2504_13xxx/2504.13175/images/0c6dd535670c9fee89fab0af96d59af261a5c6532651dadedc7cbe5d9a74f3f0.jpg b/data/2025/2504_13xxx/2504.13175/images/0c6dd535670c9fee89fab0af96d59af261a5c6532651dadedc7cbe5d9a74f3f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39a7468f93eb5765adf7f793dbd9128563225e29 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/0c6dd535670c9fee89fab0af96d59af261a5c6532651dadedc7cbe5d9a74f3f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca01e41bf5eea84dca14642eb02762e3b08e4794b8ca75bfb71a7608bd8522b6 +size 2231 diff --git a/data/2025/2504_13xxx/2504.13175/images/13dcaa9e3a9dfa6d3ab14b9ff9eb273533771f796df5a8d843bd8476f6aafff4.jpg b/data/2025/2504_13xxx/2504.13175/images/13dcaa9e3a9dfa6d3ab14b9ff9eb273533771f796df5a8d843bd8476f6aafff4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4411e5f72a51b05cb9b304f0913380905b69fb4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/13dcaa9e3a9dfa6d3ab14b9ff9eb273533771f796df5a8d843bd8476f6aafff4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a7a276dfd3e2771dfc6d5f5435c3b8a235c204cf126c4e7c86f34664e70c775 +size 24612 diff --git a/data/2025/2504_13xxx/2504.13175/images/16dd2a576111e4901e9f33a58f76893230b27ec24632b63a391010d321ebb65c.jpg b/data/2025/2504_13xxx/2504.13175/images/16dd2a576111e4901e9f33a58f76893230b27ec24632b63a391010d321ebb65c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..465cb8457fdff3e81da17570e6da619fa99c29a9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/16dd2a576111e4901e9f33a58f76893230b27ec24632b63a391010d321ebb65c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b407e4c3189f1d18c35fcca426c3f33e398270c83683cb5340d3336a54b6790 +size 19482 diff --git a/data/2025/2504_13xxx/2504.13175/images/1a73a53de5cf7d5969a3255354730536448b391b83ca1ad3bb975821256acbe8.jpg b/data/2025/2504_13xxx/2504.13175/images/1a73a53de5cf7d5969a3255354730536448b391b83ca1ad3bb975821256acbe8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c05c67706616830b07dd10cc7d86c3a97915e6b6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/1a73a53de5cf7d5969a3255354730536448b391b83ca1ad3bb975821256acbe8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd8a539d4e947d777ddf302a06c4fa44aacbdcacae16f704917d203fb8bd54d1 +size 13230 diff --git a/data/2025/2504_13xxx/2504.13175/images/204cc20f13470490e19076be5708bdccc00578f619442b1173ebe8c8a84f6755.jpg b/data/2025/2504_13xxx/2504.13175/images/204cc20f13470490e19076be5708bdccc00578f619442b1173ebe8c8a84f6755.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6020cdc840788bd45643138f0e19961f9a17dfb2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/204cc20f13470490e19076be5708bdccc00578f619442b1173ebe8c8a84f6755.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d1318c015bccbe913651f47c9f013f2ed4ee78dbe4f6bb57a913e7a979ecd4a +size 9795 diff --git a/data/2025/2504_13xxx/2504.13175/images/252ddea6586c69789ae6820fda4aa0db728af581d55c74106f6d42b8189207cf.jpg b/data/2025/2504_13xxx/2504.13175/images/252ddea6586c69789ae6820fda4aa0db728af581d55c74106f6d42b8189207cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..550393ead9a18caad9d3c1411816fdbd54bc60d0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/252ddea6586c69789ae6820fda4aa0db728af581d55c74106f6d42b8189207cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ccdf6a23d225db50eab504a1372d77dcc8c6a3bd4a9de89fddcf81330b2d2da +size 10785 diff --git a/data/2025/2504_13xxx/2504.13175/images/2e0fd12ea391852ff252c6fa013f58c83c31631f5f61d1967a0de0a5897373ab.jpg b/data/2025/2504_13xxx/2504.13175/images/2e0fd12ea391852ff252c6fa013f58c83c31631f5f61d1967a0de0a5897373ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efce2db1eba06b907cdfc1aac9ab8df2c7562d81 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/2e0fd12ea391852ff252c6fa013f58c83c31631f5f61d1967a0de0a5897373ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d99f4f2f060f7fe534ba169888db511877476297a45d909399d5c7478eeda84 +size 35170 diff --git a/data/2025/2504_13xxx/2504.13175/images/2e3c1d8bf3f055ec267b19fbbcf4be774fe8fdaaaf18d9c9511fd4775d308de0.jpg b/data/2025/2504_13xxx/2504.13175/images/2e3c1d8bf3f055ec267b19fbbcf4be774fe8fdaaaf18d9c9511fd4775d308de0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fd65e528e523674e6dd8278442b200dc90a4d6e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/2e3c1d8bf3f055ec267b19fbbcf4be774fe8fdaaaf18d9c9511fd4775d308de0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89e93afb3688a80aae22eb0b0eaff0148f115b1e601228957566f7c44c77d9f3 +size 33830 diff --git a/data/2025/2504_13xxx/2504.13175/images/329b55cea8fe8b03a6e530fbf3a83fabe0ed3b487f4a86120899ab12039a36be.jpg b/data/2025/2504_13xxx/2504.13175/images/329b55cea8fe8b03a6e530fbf3a83fabe0ed3b487f4a86120899ab12039a36be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34e6b9f8f8318316553ce28262c41121f18e23e5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/329b55cea8fe8b03a6e530fbf3a83fabe0ed3b487f4a86120899ab12039a36be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cfb0ce2465b3f4ccf6bd70ea0c77f8d8ebd0b0be8e80377221260911f4220b7 +size 18149 diff --git a/data/2025/2504_13xxx/2504.13175/images/32f802e7e30eab9ebb3a512bc0d66bfde93a7cfed378087e78ecc83db282745b.jpg b/data/2025/2504_13xxx/2504.13175/images/32f802e7e30eab9ebb3a512bc0d66bfde93a7cfed378087e78ecc83db282745b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71097ba7197d757986634cd62946f556e730bb57 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/32f802e7e30eab9ebb3a512bc0d66bfde93a7cfed378087e78ecc83db282745b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07cc54891569daaa92daec34d14619c8db7a07c0b40dab1828f779acc2e94b44 +size 3830 diff --git a/data/2025/2504_13xxx/2504.13175/images/35d892719a6309036eb90f1987157059a3f1914422d61cb54e28d76a70dda9bb.jpg b/data/2025/2504_13xxx/2504.13175/images/35d892719a6309036eb90f1987157059a3f1914422d61cb54e28d76a70dda9bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ec05aa635fbe623ed69a67ba07f218194907b30 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/35d892719a6309036eb90f1987157059a3f1914422d61cb54e28d76a70dda9bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c3d54dd1a46f94b6e7e21dcfd4acd383cadda6db328d3a4aec338be6033fa64 +size 2212 diff --git a/data/2025/2504_13xxx/2504.13175/images/3d6896c1cb137ca01211f8119d84a7c86a3674a73e95b5832cbeaff0625eac10.jpg b/data/2025/2504_13xxx/2504.13175/images/3d6896c1cb137ca01211f8119d84a7c86a3674a73e95b5832cbeaff0625eac10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5e3288ff9b10cd84329410a88b87392bad30f2d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/3d6896c1cb137ca01211f8119d84a7c86a3674a73e95b5832cbeaff0625eac10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be12fd093a45b2360cbd32eee0f1f42998405c4ae9d2aee79c4f08b46aa8a882 +size 225556 diff --git a/data/2025/2504_13xxx/2504.13175/images/47ce93682b0b588d7c62b851e04f002ea8def19b6f3b84c40cf9f6e071ca61ba.jpg b/data/2025/2504_13xxx/2504.13175/images/47ce93682b0b588d7c62b851e04f002ea8def19b6f3b84c40cf9f6e071ca61ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7d78972d779879c804c086f303c89f543076a98 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/47ce93682b0b588d7c62b851e04f002ea8def19b6f3b84c40cf9f6e071ca61ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1516cb3c9e064626a6a2e113044328852e4f0104526ab5372937bb4c0963bce5 +size 54177 diff --git a/data/2025/2504_13xxx/2504.13175/images/53bdabfc3bcfe1e3547e7ff8ab00b81643d6111bcc3f0286b640241c72bcd876.jpg b/data/2025/2504_13xxx/2504.13175/images/53bdabfc3bcfe1e3547e7ff8ab00b81643d6111bcc3f0286b640241c72bcd876.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77c3e7b5f447918c22b9198304284f3bc6c63a24 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/53bdabfc3bcfe1e3547e7ff8ab00b81643d6111bcc3f0286b640241c72bcd876.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3823d1aad2ff570e6d29b2304332e2fcd5e3f99fb9dc8f01eda1337dc55339a +size 15973 diff --git a/data/2025/2504_13xxx/2504.13175/images/54159b48155d2bc94fb982430141147332c7daf413cded000d9ad7a9a5c27bfe.jpg b/data/2025/2504_13xxx/2504.13175/images/54159b48155d2bc94fb982430141147332c7daf413cded000d9ad7a9a5c27bfe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78d8d2a73aae6ccc873dbdb238716f9d640e2af2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/54159b48155d2bc94fb982430141147332c7daf413cded000d9ad7a9a5c27bfe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c97f20ac8698e955c1628e1201410495af4a54db7f940264fec35cb48791d173 +size 8203 diff --git a/data/2025/2504_13xxx/2504.13175/images/5635c35fe428983dd5a8ea497bcc0a98f170f7860c660c5eed3f8e71338f328a.jpg b/data/2025/2504_13xxx/2504.13175/images/5635c35fe428983dd5a8ea497bcc0a98f170f7860c660c5eed3f8e71338f328a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76114398ec57e4f06897f4e7ef7c65422160d6ca --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/5635c35fe428983dd5a8ea497bcc0a98f170f7860c660c5eed3f8e71338f328a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05d621356a1da13449ffeb5c4d8f65c1a014e4497d2b57d4fdcabf58a3d938bf +size 4644 diff --git a/data/2025/2504_13xxx/2504.13175/images/58b7becf4a69a1455733dc9abbcd8d4f00caa882618621bd9e10cdabb7818fb2.jpg b/data/2025/2504_13xxx/2504.13175/images/58b7becf4a69a1455733dc9abbcd8d4f00caa882618621bd9e10cdabb7818fb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5cea4c71720893c4b41c199a2cf82daa5d223f36 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/58b7becf4a69a1455733dc9abbcd8d4f00caa882618621bd9e10cdabb7818fb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45c3eb9ef8f7e64cad5bcb8e8c85a648b34c6fa4436f6ca36eb65de895007538 +size 49556 diff --git a/data/2025/2504_13xxx/2504.13175/images/59207633f7c3959f655ac6bb14b32176d3c231d6318185a78be5b3be508d1b8a.jpg b/data/2025/2504_13xxx/2504.13175/images/59207633f7c3959f655ac6bb14b32176d3c231d6318185a78be5b3be508d1b8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59cc9cf8b700b3230338b3e5710f0ae051da1adb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/59207633f7c3959f655ac6bb14b32176d3c231d6318185a78be5b3be508d1b8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d2fbe4f424b69aaa5308e74c9e771855a857dba6dd59d13ea4ed12914004079 +size 17201 diff --git a/data/2025/2504_13xxx/2504.13175/images/593193a036e1192cd5ed93966c35e5f47f82a3d6aec90ed02ce3e52b86db2fbb.jpg b/data/2025/2504_13xxx/2504.13175/images/593193a036e1192cd5ed93966c35e5f47f82a3d6aec90ed02ce3e52b86db2fbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba5f921487a740f550bceaabea1e5667aedf91d1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/593193a036e1192cd5ed93966c35e5f47f82a3d6aec90ed02ce3e52b86db2fbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c098e76796c115ee2f3064c916fd22c8ee2b6d55cdec6cd34d0716b9f946a40 +size 31310 diff --git a/data/2025/2504_13xxx/2504.13175/images/5d09305c79d9a9ea490929ca7f29f0ac25de87e3b285cb4657c67099cc780c0e.jpg b/data/2025/2504_13xxx/2504.13175/images/5d09305c79d9a9ea490929ca7f29f0ac25de87e3b285cb4657c67099cc780c0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d499bb1ee3a964682deaf2ee7ba07419b569397a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/5d09305c79d9a9ea490929ca7f29f0ac25de87e3b285cb4657c67099cc780c0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3975e4f5f987b604d6d1dbf9f7f49580f71386c1a959f9c8fc28ae578e26146e +size 11095 diff --git a/data/2025/2504_13xxx/2504.13175/images/5fc498dcd53b37e9bf2a31cb0c5a87fdec8b68b0eeafba95226135e5bb3b20ad.jpg b/data/2025/2504_13xxx/2504.13175/images/5fc498dcd53b37e9bf2a31cb0c5a87fdec8b68b0eeafba95226135e5bb3b20ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25c4603b3682cb83228b31d8fa8184af51fabac0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/5fc498dcd53b37e9bf2a31cb0c5a87fdec8b68b0eeafba95226135e5bb3b20ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56e008b21ed2659c03e8bef50dad6a343e8464e932053bffcbdf9eee0df203fa +size 100917 diff --git a/data/2025/2504_13xxx/2504.13175/images/60af5eb5b5e6d990c8aedc4700c76702cdd364cee0151b662b675636ff1cd24d.jpg b/data/2025/2504_13xxx/2504.13175/images/60af5eb5b5e6d990c8aedc4700c76702cdd364cee0151b662b675636ff1cd24d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b05a6ea4d1f61cdc8d57193278caea0f6b238bc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/60af5eb5b5e6d990c8aedc4700c76702cdd364cee0151b662b675636ff1cd24d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b975628cc0e03ee4dc31fb95914d953c011c1e8b52f5192ea2a6ed23ab66620c +size 15651 diff --git a/data/2025/2504_13xxx/2504.13175/images/634185e845433b270dbf4c6ed6419b70d4472463ac896032f1924fce80c1f39d.jpg b/data/2025/2504_13xxx/2504.13175/images/634185e845433b270dbf4c6ed6419b70d4472463ac896032f1924fce80c1f39d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4513d5cba2b05d25014592f5234beea72e59b807 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/634185e845433b270dbf4c6ed6419b70d4472463ac896032f1924fce80c1f39d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78e74a5697807ea42a975a86f28fbde38b2ee3ad17563cfa0aa8c1a31da5708a +size 53628 diff --git a/data/2025/2504_13xxx/2504.13175/images/651c8579ed255e83ff43a3e35df049e75c3f7ca851ccbfa6c751c1ccb609b141.jpg b/data/2025/2504_13xxx/2504.13175/images/651c8579ed255e83ff43a3e35df049e75c3f7ca851ccbfa6c751c1ccb609b141.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ed928e9d80c2deb0660b8a40d8abbd7f458f040 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/651c8579ed255e83ff43a3e35df049e75c3f7ca851ccbfa6c751c1ccb609b141.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c45d0c85e20ecb3791f27f2ce386920d569a1d58f04ee8f2e9eabd74a23d666 +size 4916 diff --git a/data/2025/2504_13xxx/2504.13175/images/6bd1b346120cd70446996e5df9bbe70575020c94491592e0f4020894b7c819bf.jpg b/data/2025/2504_13xxx/2504.13175/images/6bd1b346120cd70446996e5df9bbe70575020c94491592e0f4020894b7c819bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe3d7f2efd6dc6e003bd60d2f3b796763ecfef04 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/6bd1b346120cd70446996e5df9bbe70575020c94491592e0f4020894b7c819bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a2ea1e1387b5bd581f307e20c86b56fc6d86e0f199aaf1d9290afbdb86cab69 +size 9063 diff --git a/data/2025/2504_13xxx/2504.13175/images/6c712ee580435498113446ac682efcc00cab680a3b4f15bd27073ade5c0eb34a.jpg b/data/2025/2504_13xxx/2504.13175/images/6c712ee580435498113446ac682efcc00cab680a3b4f15bd27073ade5c0eb34a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ce28014615ca35246a71734658613de99587e13 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/6c712ee580435498113446ac682efcc00cab680a3b4f15bd27073ade5c0eb34a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a9aa2f2bd5afb983a324afbdb2042f8fb541c1ec4622acdf62dcfcec89924fb +size 2135 diff --git a/data/2025/2504_13xxx/2504.13175/images/713492fd54f41dfe9c9392349b53cc107ba56211f668b2e7a40851b6a50c577b.jpg b/data/2025/2504_13xxx/2504.13175/images/713492fd54f41dfe9c9392349b53cc107ba56211f668b2e7a40851b6a50c577b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efafcd764f046c2cb07081d9a1a577ab51b4efe1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/713492fd54f41dfe9c9392349b53cc107ba56211f668b2e7a40851b6a50c577b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97810abdb9b5341db1d125d8f271578d1fa3395a35275243ac2cbd0860bf9380 +size 41008 diff --git a/data/2025/2504_13xxx/2504.13175/images/7487552d7951c122ba5686051d88b19406f8a33cc142bc67f1dc01e59f1ec80c.jpg b/data/2025/2504_13xxx/2504.13175/images/7487552d7951c122ba5686051d88b19406f8a33cc142bc67f1dc01e59f1ec80c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84ac43a1a8c4ff6a65436286448438434f2a326e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/7487552d7951c122ba5686051d88b19406f8a33cc142bc67f1dc01e59f1ec80c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0cf5572f20c4a073ffc864b5ca7149764dfaf9d8c3a1c8dd81090a350ee35a8 +size 5428 diff --git a/data/2025/2504_13xxx/2504.13175/images/769c1cb609e8187e90aa9a54525152cb88b18d9c939ca5cea3edcf9e9d686ff0.jpg b/data/2025/2504_13xxx/2504.13175/images/769c1cb609e8187e90aa9a54525152cb88b18d9c939ca5cea3edcf9e9d686ff0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f963a59530cdeab5e7b159e8a6ecf0067e038057 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/769c1cb609e8187e90aa9a54525152cb88b18d9c939ca5cea3edcf9e9d686ff0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5447c2263722e3129e11f52e78ff9a9437ebc9a70d559d453b6592ce7b2648f0 +size 26414 diff --git a/data/2025/2504_13xxx/2504.13175/images/79e5bb260cccc21139c56b6b96836f36c03754a598fc315e4f626c94f71eb460.jpg b/data/2025/2504_13xxx/2504.13175/images/79e5bb260cccc21139c56b6b96836f36c03754a598fc315e4f626c94f71eb460.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9258bb4bdc3db4b1f5b499301ca9df530ae99d34 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/79e5bb260cccc21139c56b6b96836f36c03754a598fc315e4f626c94f71eb460.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e569a7e79fbb74c191c4128451b30a565f145e4196bb3da66b69444ac424785 +size 3947 diff --git a/data/2025/2504_13xxx/2504.13175/images/7c0ee42e2207295b362773c05b96cce7bdf9cb3c5a14fb57c35ec76aca30ed78.jpg b/data/2025/2504_13xxx/2504.13175/images/7c0ee42e2207295b362773c05b96cce7bdf9cb3c5a14fb57c35ec76aca30ed78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75686fbfa886821d1fa19723513608109d0ee298 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/7c0ee42e2207295b362773c05b96cce7bdf9cb3c5a14fb57c35ec76aca30ed78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d846dcfd3d0a75ee72575dc66624a4100dbfb2742d071d3293de201836d6beae +size 17069 diff --git a/data/2025/2504_13xxx/2504.13175/images/7fb3bdba025175d006730e46359f428af9eb04d23d00c54192248e1fc363c727.jpg b/data/2025/2504_13xxx/2504.13175/images/7fb3bdba025175d006730e46359f428af9eb04d23d00c54192248e1fc363c727.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2419d89f285fd4bf2018200f2b520e02a66bc853 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/7fb3bdba025175d006730e46359f428af9eb04d23d00c54192248e1fc363c727.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64aeaa76b419106a6d2d66413d9c093cdb05514197e961a61ce5aaf6a5807fe3 +size 34434 diff --git a/data/2025/2504_13xxx/2504.13175/images/81cb33fba68552587a99e632f725c1fd0106b2bf17cec9b502a1101e7d1927c5.jpg b/data/2025/2504_13xxx/2504.13175/images/81cb33fba68552587a99e632f725c1fd0106b2bf17cec9b502a1101e7d1927c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..225ecf7120620bf3260bbf2ca0759fe3244fc57a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/81cb33fba68552587a99e632f725c1fd0106b2bf17cec9b502a1101e7d1927c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61d63781929f2741b59ac35b218f43c0f2aaeed3c22414556bef9e64335048b2 +size 6343 diff --git a/data/2025/2504_13xxx/2504.13175/images/8549171b20db883a106ecd8a4fb7e8188d2842e143a98d206dfd88352a0f646f.jpg b/data/2025/2504_13xxx/2504.13175/images/8549171b20db883a106ecd8a4fb7e8188d2842e143a98d206dfd88352a0f646f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1675d59114758a41a38e2a5c7357a3c123e7a72 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/8549171b20db883a106ecd8a4fb7e8188d2842e143a98d206dfd88352a0f646f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2882a35bc2c8d2c806200d5ddc3bf9c2b7e0efec74c0c4908066263bc6797c33 +size 15728 diff --git a/data/2025/2504_13xxx/2504.13175/images/921706bfd91926b2ddfacffade625fe78c513762e6ee3133e762d51a20542847.jpg b/data/2025/2504_13xxx/2504.13175/images/921706bfd91926b2ddfacffade625fe78c513762e6ee3133e762d51a20542847.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf2c0f02a1092cd82341b5c9a8c55948e3943b81 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/921706bfd91926b2ddfacffade625fe78c513762e6ee3133e762d51a20542847.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ae2bd8701ef5b0ac99180b3d70a67457ae7ba0ef6efceb165f14908ac732a65 +size 7522 diff --git a/data/2025/2504_13xxx/2504.13175/images/9e2a04949ec35b9429c9369bf9c1e653d07dfe421af7984acd97fbe9fab0f4b9.jpg b/data/2025/2504_13xxx/2504.13175/images/9e2a04949ec35b9429c9369bf9c1e653d07dfe421af7984acd97fbe9fab0f4b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99fb41de9c46ed830e33df40319dd7add6ebae12 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/9e2a04949ec35b9429c9369bf9c1e653d07dfe421af7984acd97fbe9fab0f4b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13e0d29fd14747e513c200d957d5807bcac25ab2f8119b033a24aa45bcdb1958 +size 48091 diff --git a/data/2025/2504_13xxx/2504.13175/images/a41aa0ae08cfe0f40d88521de28cdd9b71e1cb3a141d22dbcaafc61c97e8a93c.jpg b/data/2025/2504_13xxx/2504.13175/images/a41aa0ae08cfe0f40d88521de28cdd9b71e1cb3a141d22dbcaafc61c97e8a93c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8508f3c3f49773e0567f6347457d896979c6355c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/a41aa0ae08cfe0f40d88521de28cdd9b71e1cb3a141d22dbcaafc61c97e8a93c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:331a4d05eb860cfed913877c5308ec9beba052c6734adf63d8628a7cf73f87bd +size 13730 diff --git a/data/2025/2504_13xxx/2504.13175/images/ad8a63afe86e54c84c472d26fc0ff1ed8c976194f0e749cdf2deb8a3d08d55a2.jpg b/data/2025/2504_13xxx/2504.13175/images/ad8a63afe86e54c84c472d26fc0ff1ed8c976194f0e749cdf2deb8a3d08d55a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5342eb4eae58c3458a902fdbac94462c10253b20 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/ad8a63afe86e54c84c472d26fc0ff1ed8c976194f0e749cdf2deb8a3d08d55a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63e0cc48cf4e9224c53d724569610b386f50fea202ae84ebd4654dcefa6c3246 +size 4818 diff --git a/data/2025/2504_13xxx/2504.13175/images/b14d38a8e57726af6c838d49b428567bd4e1a21daa955cdaf7fc7f49c8370334.jpg b/data/2025/2504_13xxx/2504.13175/images/b14d38a8e57726af6c838d49b428567bd4e1a21daa955cdaf7fc7f49c8370334.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fd427e39324c03a9749957a18397b0eb4a28787 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b14d38a8e57726af6c838d49b428567bd4e1a21daa955cdaf7fc7f49c8370334.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a6dd56eb5c2935c690bed3eb7599eb7237f37bd8c7f8a0fc3649faaf595b13b +size 40937 diff --git a/data/2025/2504_13xxx/2504.13175/images/b3bb47439709f6859a0518f05864ad27d39444c7291615594c3434026ef8983f.jpg b/data/2025/2504_13xxx/2504.13175/images/b3bb47439709f6859a0518f05864ad27d39444c7291615594c3434026ef8983f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0b7f3edf1b3bf56299f1ff659f2257cb0a3ce05 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b3bb47439709f6859a0518f05864ad27d39444c7291615594c3434026ef8983f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bc559d63612c1ba954acb13b85cbb54a8f1f8f279e51beee570b7c17b10baa2 +size 37228 diff --git a/data/2025/2504_13xxx/2504.13175/images/b41581ae16497916fa7f0a8c905149a917be9de8c31d49423683b8755835e764.jpg b/data/2025/2504_13xxx/2504.13175/images/b41581ae16497916fa7f0a8c905149a917be9de8c31d49423683b8755835e764.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d043e0be84f2f37a7f3424518f5019388182553 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b41581ae16497916fa7f0a8c905149a917be9de8c31d49423683b8755835e764.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e4b6d1dee960d8a43fdbd9aff87e7f9b2140497062b5f39dbeaac2cd4ef7f03 +size 28921 diff --git a/data/2025/2504_13xxx/2504.13175/images/b5af8d67c630930530fc45bc66a04dbfea14f7bd87c0b73e0f0ba1c940517e9f.jpg b/data/2025/2504_13xxx/2504.13175/images/b5af8d67c630930530fc45bc66a04dbfea14f7bd87c0b73e0f0ba1c940517e9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c083c96ce46a2e5b597e96f65f39b2688be0278f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b5af8d67c630930530fc45bc66a04dbfea14f7bd87c0b73e0f0ba1c940517e9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6580ca0632be5598eddf806904f2d4529a079fe77c5d4ff6363256ec61773562 +size 14953 diff --git a/data/2025/2504_13xxx/2504.13175/images/b769051aa8353358bbea6381920c97be9cab33e0f3548ab34a755084f2d2e49a.jpg b/data/2025/2504_13xxx/2504.13175/images/b769051aa8353358bbea6381920c97be9cab33e0f3548ab34a755084f2d2e49a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d90e1dcdcbc4a1c126d736ff68bab9ff80dc6f97 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b769051aa8353358bbea6381920c97be9cab33e0f3548ab34a755084f2d2e49a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f06fa9625a4b153359f479fd9329b40addcc96929f1e7985a84e4c7243d0ca9 +size 33245 diff --git a/data/2025/2504_13xxx/2504.13175/images/b7a8ceac4b8ee514a489212603ea549598f4d1fc5abfd59ccd58c611623633b3.jpg b/data/2025/2504_13xxx/2504.13175/images/b7a8ceac4b8ee514a489212603ea549598f4d1fc5abfd59ccd58c611623633b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99fa7d396a5f206380611a42ed9e9a2c88b695d8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b7a8ceac4b8ee514a489212603ea549598f4d1fc5abfd59ccd58c611623633b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28ad96e37208df0a5d54ab6c0d714eab8dfa025da8474b88119af456696a1c6f +size 2893 diff --git a/data/2025/2504_13xxx/2504.13175/images/b9a164729ad40c6dadad6a0d166fe8cdcb32404ecd6fce30f340c478aaf9a819.jpg b/data/2025/2504_13xxx/2504.13175/images/b9a164729ad40c6dadad6a0d166fe8cdcb32404ecd6fce30f340c478aaf9a819.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8db6b444da6dc9fd8924afb71f34a3127e5bd1c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/b9a164729ad40c6dadad6a0d166fe8cdcb32404ecd6fce30f340c478aaf9a819.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f4b413c316e6f513ac05d784d3fb2b587f8cb9c29964297917d5bb6a842fc3b +size 13854 diff --git a/data/2025/2504_13xxx/2504.13175/images/bfa84d627f027b5cabf7370abddf61fa5b720604e608b37159e90f00ff8370c3.jpg b/data/2025/2504_13xxx/2504.13175/images/bfa84d627f027b5cabf7370abddf61fa5b720604e608b37159e90f00ff8370c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d7e4cef82aadd12d02ab07971072efab9e97320 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/bfa84d627f027b5cabf7370abddf61fa5b720604e608b37159e90f00ff8370c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da4d3b2032ce718e368aeaacdcdb5597d873790c0f4f0c4b3447f7bf945f9b01 +size 8141 diff --git a/data/2025/2504_13xxx/2504.13175/images/c5c09f64051cd724f7b3e6312ccfdddac8aea763c0c913815e3c6e8c9607d583.jpg b/data/2025/2504_13xxx/2504.13175/images/c5c09f64051cd724f7b3e6312ccfdddac8aea763c0c913815e3c6e8c9607d583.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41c2c7c38d757e352a5730ef49fca8133c06c7ec --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/c5c09f64051cd724f7b3e6312ccfdddac8aea763c0c913815e3c6e8c9607d583.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfa3e12baaab2cde46795ee6c362922a3454ecbf04eee0d86de9e39c286d9152 +size 4699 diff --git a/data/2025/2504_13xxx/2504.13175/images/c977fae0b468eff6be7f5aa6a8aee6bcc3d814a04aeddbe4bf91e1d72ad21c80.jpg b/data/2025/2504_13xxx/2504.13175/images/c977fae0b468eff6be7f5aa6a8aee6bcc3d814a04aeddbe4bf91e1d72ad21c80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53f7562d4784d45c678f36e44715e450ec98e6a6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/c977fae0b468eff6be7f5aa6a8aee6bcc3d814a04aeddbe4bf91e1d72ad21c80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8de5e4638a03d445c5aa5f1dfdb707148e52ba2b494014b7e55bd49a52147c48 +size 15695 diff --git a/data/2025/2504_13xxx/2504.13175/images/c99791955c59958e854fdf45de3a0ffb3663991c619063f2566d76acdee2993c.jpg b/data/2025/2504_13xxx/2504.13175/images/c99791955c59958e854fdf45de3a0ffb3663991c619063f2566d76acdee2993c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..043868f193c1e70f4c2ffae4410786c3991febf1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/c99791955c59958e854fdf45de3a0ffb3663991c619063f2566d76acdee2993c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7be927dc3be7ab311334c207277b9b2ceac3dd4b5b2d1e9c95e6e6d122c019f2 +size 9528 diff --git a/data/2025/2504_13xxx/2504.13175/images/cbbee3983b4fa45dcfa7ca8c7ae0abebe680859c6470cb42bd0fe8d571a960d1.jpg b/data/2025/2504_13xxx/2504.13175/images/cbbee3983b4fa45dcfa7ca8c7ae0abebe680859c6470cb42bd0fe8d571a960d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..728f5f3497d8767c386b8600a88f766e9f8656dc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/cbbee3983b4fa45dcfa7ca8c7ae0abebe680859c6470cb42bd0fe8d571a960d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc43cf4bcf07557312d1155d935861cffbfadf31728e3353d4c29a0d69b729c2 +size 2260 diff --git a/data/2025/2504_13xxx/2504.13175/images/ce3b3d5d5e34b5db3092a19eba1ca889fe0cf2d6719286ac72de291560196841.jpg b/data/2025/2504_13xxx/2504.13175/images/ce3b3d5d5e34b5db3092a19eba1ca889fe0cf2d6719286ac72de291560196841.jpg new file mode 100644 index 0000000000000000000000000000000000000000..766fd8a54e3121349f3df386f5ffc9c950bc0b52 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/ce3b3d5d5e34b5db3092a19eba1ca889fe0cf2d6719286ac72de291560196841.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a359bd2f7d226386fda0d1fd40c32c6541f6c9c7c68c8c19e6020f5007e282c +size 38795 diff --git a/data/2025/2504_13xxx/2504.13175/images/d412f2e90064cb5e4add7b49aa82fb97fd4fd497c8072ee8f2634f614d4c1693.jpg b/data/2025/2504_13xxx/2504.13175/images/d412f2e90064cb5e4add7b49aa82fb97fd4fd497c8072ee8f2634f614d4c1693.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3829766d5ad956978cc9a258c5e1cb66e44578fc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/d412f2e90064cb5e4add7b49aa82fb97fd4fd497c8072ee8f2634f614d4c1693.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28af48bd1389ecf527d3cb045d256d0d4a3a0f4ffe148411c791f2ad341045c0 +size 9364 diff --git a/data/2025/2504_13xxx/2504.13175/images/d5097ed7bc541524999fba782271c6bc4995c4fd1d3def8c5567bd83a2267a53.jpg b/data/2025/2504_13xxx/2504.13175/images/d5097ed7bc541524999fba782271c6bc4995c4fd1d3def8c5567bd83a2267a53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fcbdbb0d9df770f958b1403496ed48f19bc147ab --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/d5097ed7bc541524999fba782271c6bc4995c4fd1d3def8c5567bd83a2267a53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d1dd1e5d9cc376256c06a697d77ef362034bff50721bf53025bfe5a48ac8615 +size 15818 diff --git a/data/2025/2504_13xxx/2504.13175/images/d9cd8ada7b70be3c181ebc936eaa3c6cb7f99d355e76e7d0442c35452458b5ee.jpg b/data/2025/2504_13xxx/2504.13175/images/d9cd8ada7b70be3c181ebc936eaa3c6cb7f99d355e76e7d0442c35452458b5ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfafee959de7331b726b7fbb4671beeb15dfadff --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/d9cd8ada7b70be3c181ebc936eaa3c6cb7f99d355e76e7d0442c35452458b5ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5846990aea391ba1c1e82066b6e073ab2acaa1e7749c701f48b7f0daed63bcb2 +size 33111 diff --git a/data/2025/2504_13xxx/2504.13175/images/da69f2f768f40b9de798fcb498306faa3f03ffbefd1e161872cb6fab13332383.jpg b/data/2025/2504_13xxx/2504.13175/images/da69f2f768f40b9de798fcb498306faa3f03ffbefd1e161872cb6fab13332383.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34597c29455a43cdb0e5cdc1b1d550b006e2d84c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/da69f2f768f40b9de798fcb498306faa3f03ffbefd1e161872cb6fab13332383.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb1ef98c408fdc8a043cc85b20992fa0008b38f0f84b114fb229f5a0691ef7e1 +size 4149 diff --git a/data/2025/2504_13xxx/2504.13175/images/db06a771fc3c7e7fb1af214d8d12133a1d23f93de90106fc8bc75691ee37af30.jpg b/data/2025/2504_13xxx/2504.13175/images/db06a771fc3c7e7fb1af214d8d12133a1d23f93de90106fc8bc75691ee37af30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..babec75ea865087dc5516405f30d4978a60d8a8f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/db06a771fc3c7e7fb1af214d8d12133a1d23f93de90106fc8bc75691ee37af30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:691c61805b758e408a018f950ab01f35867a4498603ddd16aaf17eec9bfad88f +size 56605 diff --git a/data/2025/2504_13xxx/2504.13175/images/dd23fb58097a52a0676ee630392ebd652e6e0cd011735ee77e91a73c4726d8b8.jpg b/data/2025/2504_13xxx/2504.13175/images/dd23fb58097a52a0676ee630392ebd652e6e0cd011735ee77e91a73c4726d8b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e17c75f6d717e20f605c5a1cf7337fefe0e30452 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/dd23fb58097a52a0676ee630392ebd652e6e0cd011735ee77e91a73c4726d8b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46003bef0128be097617b098f54dbd345fd7542bac87ac9ca90cee908eb3d0b4 +size 44554 diff --git a/data/2025/2504_13xxx/2504.13175/images/e1c1d0890a88776f034814686b64841cb1e31357eba2629bd3995babb7dd23ff.jpg b/data/2025/2504_13xxx/2504.13175/images/e1c1d0890a88776f034814686b64841cb1e31357eba2629bd3995babb7dd23ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb4ecea7a4126bd3e5624e04f9f14f107d4c9cc0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/e1c1d0890a88776f034814686b64841cb1e31357eba2629bd3995babb7dd23ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3fee26b2e39d9a023c039f202e2498bb925e91e2eae18d2bf42f5f37b9d208b +size 2432 diff --git a/data/2025/2504_13xxx/2504.13175/images/e88c849405e5c7727046c30ea2b953484ed9a565c0b49f67a68419533bbd9545.jpg b/data/2025/2504_13xxx/2504.13175/images/e88c849405e5c7727046c30ea2b953484ed9a565c0b49f67a68419533bbd9545.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6c1cbdf596c44a935902b32eac608728e4606b2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/e88c849405e5c7727046c30ea2b953484ed9a565c0b49f67a68419533bbd9545.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:effeb651be3dba672985d48678d477d1238125aab46371e71c11a9c8f4f31263 +size 5020 diff --git a/data/2025/2504_13xxx/2504.13175/images/ed7242ed314a3d14a5e43aeeb9a806ab6c3d6c1e23699bf13089e97f9fecf66c.jpg b/data/2025/2504_13xxx/2504.13175/images/ed7242ed314a3d14a5e43aeeb9a806ab6c3d6c1e23699bf13089e97f9fecf66c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc1d979dcb60626f834418dc2fe6f51273269e81 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/ed7242ed314a3d14a5e43aeeb9a806ab6c3d6c1e23699bf13089e97f9fecf66c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eac28c43f3853af9ece8cd965af810c9562aaea0580067a710a0af1538f6a4c +size 9377 diff --git a/data/2025/2504_13xxx/2504.13175/images/f3eaa222de3279e0d39362bc71e285457b8dcac0a80024bc991bb38326d20e19.jpg b/data/2025/2504_13xxx/2504.13175/images/f3eaa222de3279e0d39362bc71e285457b8dcac0a80024bc991bb38326d20e19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcaee1c0fc475daecdd1717e6f4be317154b69fd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/f3eaa222de3279e0d39362bc71e285457b8dcac0a80024bc991bb38326d20e19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b43ea6da31a196ae3d3d53a681871aee108e876f5018d89f24a9cd06cbe3c4c0 +size 8366 diff --git a/data/2025/2504_13xxx/2504.13175/images/f44bde63f046c6bacfede4ff3776e2d1fd5281da40a9fa7ceabbe1c7f488d922.jpg b/data/2025/2504_13xxx/2504.13175/images/f44bde63f046c6bacfede4ff3776e2d1fd5281da40a9fa7ceabbe1c7f488d922.jpg new file mode 100644 index 0000000000000000000000000000000000000000..930ec26560bc0671fe435ca8078b8db9c7558996 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/f44bde63f046c6bacfede4ff3776e2d1fd5281da40a9fa7ceabbe1c7f488d922.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9f1baba52bf7e6a3b6c014424bffbf961792df235866f45c5d4b3cfe07370d7 +size 19021 diff --git a/data/2025/2504_13xxx/2504.13175/images/ff405c68cb5d967e4b0eeafc5ac0ad858d489b5593cd217e835c1ae30bf17ff3.jpg b/data/2025/2504_13xxx/2504.13175/images/ff405c68cb5d967e4b0eeafc5ac0ad858d489b5593cd217e835c1ae30bf17ff3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96c7fa0f6d9231d00fc587cfc7e2eaa39a050079 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/ff405c68cb5d967e4b0eeafc5ac0ad858d489b5593cd217e835c1ae30bf17ff3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e5c50af10f9a8bcc7129316fe7cf59b96d922dfb4924cef0f676cbcdd4dbffd +size 4661 diff --git a/data/2025/2504_13xxx/2504.13175/images/ffbe554629ab2c7fde2741f4a6ad747dff67168c471d6998a60a66393fb36830.jpg b/data/2025/2504_13xxx/2504.13175/images/ffbe554629ab2c7fde2741f4a6ad747dff67168c471d6998a60a66393fb36830.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d2d603c69643187caf67a2cc3ac38032eac5719 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/images/ffbe554629ab2c7fde2741f4a6ad747dff67168c471d6998a60a66393fb36830.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bb4a15e5c47f3c585a151f42501360f707abe82e55e5b75cdd64b1cca772a7d +size 2101 diff --git a/data/2025/2504_13xxx/2504.13175/layout.json b/data/2025/2504_13xxx/2504.13175/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2793e28a92524b33ba39463b1bb7c8886c2b04d9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13175/layout.json @@ -0,0 +1,14565 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 62, + 57, + 548, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 57, + 548, + 113 + ], + "spans": [ + { + "bbox": [ + 62, + 57, + 548, + 113 + ], + "type": "text", + "content": "Novel Demonstration Generation with Gaussian Splitting Enables Robust One-Shot Manipulation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "spans": [ + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "text", + "content": "Sizhe Yang\\*,1,2 Wenye " + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{Yu}^{*,1,3}" + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "text", + "content": " Jia Zeng" + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "text", + "content": " Jun Lv" + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "text", + "content": " Kerui Ren" + }, + { + "bbox": [ + 143, + 125, + 462, + 140 + ], + "type": "inline_equation", + "content": "^{1,3}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "spans": [ + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "text", + "content": "Cewu Lu" + }, + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "text", + "content": " Dahua Lin" + }, + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "text", + "content": " Jiangmiao Pang" + }, + { + "bbox": [ + 197, + 140, + 408, + 153 + ], + "type": "inline_equation", + "content": "^{1,\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 153, + 458, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 153, + 458, + 167 + ], + "spans": [ + { + "bbox": [ + 149, + 153, + 458, + 167 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 149, + 153, + 458, + 167 + ], + "type": "text", + "content": "Shanghai AI Laboratory " + }, + { + "bbox": [ + 149, + 153, + 458, + 167 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 149, + 153, + 458, + 167 + ], + "type": "text", + "content": "The Chinese University of Hong Kong" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 231, + 167, + 375, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 167, + 375, + 180 + ], + "spans": [ + { + "bbox": [ + 231, + 167, + 375, + 180 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 231, + 167, + 375, + 180 + ], + "type": "text", + "content": "Shanghai Jiao Tong University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 196, + 180, + 414, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 180, + 414, + 193 + ], + "spans": [ + { + "bbox": [ + 196, + 180, + 414, + 193 + ], + "type": "text", + "content": "* Equal contributions † Corresponding author" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 188, + 194, + 415, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 194, + 415, + 207 + ], + "spans": [ + { + "bbox": [ + 188, + 194, + 415, + 207 + ], + "type": "text", + "content": "Project page: https://yangsizhe.github.io/robosplat/" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 50, + 225, + 296, + 303 + ], + "blocks": [ + { + "bbox": [ + 50, + 225, + 296, + 303 + ], + "lines": [ + { + "bbox": [ + 50, + 225, + 296, + 303 + ], + "spans": [ + { + "bbox": [ + 50, + 225, + 296, + 303 + ], + "type": "image", + "image_path": "b41581ae16497916fa7f0a8c905149a917be9de8c31d49423683b8755835e764.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 306, + 213, + 316 + ], + "lines": [ + { + "bbox": [ + 59, + 306, + 213, + 316 + ], + "spans": [ + { + "bbox": [ + 59, + 306, + 213, + 316 + ], + "type": "text", + "content": "Novel Demonstration Generation" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 58, + 321, + 126, + 382 + ], + "blocks": [ + { + "bbox": [ + 58, + 321, + 126, + 382 + ], + "lines": [ + { + "bbox": [ + 58, + 321, + 126, + 382 + ], + "spans": [ + { + "bbox": [ + 58, + 321, + 126, + 382 + ], + "type": "image", + "image_path": "e88c849405e5c7727046c30ea2b953484ed9a565c0b49f67a68419533bbd9545.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 58, + 383, + 126, + 441 + ], + "blocks": [ + { + "bbox": [ + 58, + 383, + 126, + 441 + ], + "lines": [ + { + "bbox": [ + 58, + 383, + 126, + 441 + ], + "spans": [ + { + "bbox": [ + 58, + 383, + 126, + 441 + ], + "type": "image", + "image_path": "ad8a63afe86e54c84c472d26fc0ff1ed8c976194f0e749cdf2deb8a3d08d55a2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 449, + 564, + 497 + ], + "lines": [ + { + "bbox": [ + 45, + 449, + 564, + 497 + ], + "spans": [ + { + "bbox": [ + 45, + 449, + 564, + 497 + ], + "type": "text", + "content": "Fig. 1: Starting from a single expert demonstration and multi-view images, our method generates diverse and visually realistic data for policy learning, enabling robust performance across six types of generalization in the real world. Compared to previous 2D data augmentation methods, our approach achieves significantly better results across various generalization types. Notably, we achieve this within a unified framework." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 132, + 322, + 200, + 381 + ], + "blocks": [ + { + "bbox": [ + 132, + 322, + 200, + 381 + ], + "lines": [ + { + "bbox": [ + 132, + 322, + 200, + 381 + ], + "spans": [ + { + "bbox": [ + 132, + 322, + 200, + 381 + ], + "type": "image", + "image_path": "32f802e7e30eab9ebb3a512bc0d66bfde93a7cfed378087e78ecc83db282745b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 132, + 382, + 200, + 441 + ], + "blocks": [ + { + "bbox": [ + 132, + 382, + 200, + 441 + ], + "lines": [ + { + "bbox": [ + 132, + 382, + 200, + 441 + ], + "spans": [ + { + "bbox": [ + 132, + 382, + 200, + 441 + ], + "type": "image", + "image_path": "651c8579ed255e83ff43a3e35df049e75c3f7ca851ccbfa6c751c1ccb609b141.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 206, + 321, + 274, + 381 + ], + "blocks": [ + { + "bbox": [ + 206, + 321, + 274, + 381 + ], + "lines": [ + { + "bbox": [ + 206, + 321, + 274, + 381 + ], + "spans": [ + { + "bbox": [ + 206, + 321, + 274, + 381 + ], + "type": "image", + "image_path": "f3eaa222de3279e0d39362bc71e285457b8dcac0a80024bc991bb38326d20e19.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 206, + 382, + 274, + 441 + ], + "blocks": [ + { + "bbox": [ + 206, + 382, + 274, + 441 + ], + "lines": [ + { + "bbox": [ + 206, + 382, + 274, + 441 + ], + "spans": [ + { + "bbox": [ + 206, + 382, + 274, + 441 + ], + "type": "image", + "image_path": "c5c09f64051cd724f7b3e6312ccfdddac8aea763c0c913815e3c6e8c9607d583.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 298, + 238, + 380, + 300 + ], + "blocks": [ + { + "bbox": [ + 299, + 226, + 364, + 236 + ], + "lines": [ + { + "bbox": [ + 299, + 226, + 364, + 236 + ], + "spans": [ + { + "bbox": [ + 299, + 226, + 364, + 236 + ], + "type": "text", + "content": "Generalization" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 298, + 238, + 380, + 300 + ], + "lines": [ + { + "bbox": [ + 298, + 238, + 380, + 300 + ], + "spans": [ + { + "bbox": [ + 298, + 238, + 380, + 300 + ], + "type": "image", + "image_path": "54159b48155d2bc94fb982430141147332c7daf413cded000d9ad7a9a5c27bfe.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 298, + 300, + 379, + 361 + ], + "blocks": [ + { + "bbox": [ + 298, + 300, + 379, + 361 + ], + "lines": [ + { + "bbox": [ + 298, + 300, + 379, + 361 + ], + "spans": [ + { + "bbox": [ + 298, + 300, + 379, + 361 + ], + "type": "image", + "image_path": "81cb33fba68552587a99e632f725c1fd0106b2bf17cec9b502a1101e7d1927c5.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 298, + 362, + 380, + 424 + ], + "blocks": [ + { + "bbox": [ + 298, + 362, + 380, + 424 + ], + "lines": [ + { + "bbox": [ + 298, + 362, + 380, + 424 + ], + "spans": [ + { + "bbox": [ + 298, + 362, + 380, + 424 + ], + "type": "image", + "image_path": "5d09305c79d9a9ea490929ca7f29f0ac25de87e3b285cb4657c67099cc780c0e.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 430, + 391, + 437 + ], + "lines": [ + { + "bbox": [ + 305, + 430, + 391, + 437 + ], + "spans": [ + { + "bbox": [ + 305, + 430, + 391, + 437 + ], + "type": "text", + "content": "Training Data Source:" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 383, + 258, + 422, + 289 + ], + "blocks": [ + { + "bbox": [ + 383, + 258, + 422, + 289 + ], + "lines": [ + { + "bbox": [ + 383, + 258, + 422, + 289 + ], + "spans": [ + { + "bbox": [ + 383, + 258, + 422, + 289 + ], + "type": "image", + "image_path": "ffbe554629ab2c7fde2741f4a6ad747dff67168c471d6998a60a66393fb36830.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 383, + 323, + 425, + 352 + ], + "blocks": [ + { + "bbox": [ + 383, + 323, + 425, + 352 + ], + "lines": [ + { + "bbox": [ + 383, + 323, + 425, + 352 + ], + "spans": [ + { + "bbox": [ + 383, + 323, + 425, + 352 + ], + "type": "image", + "image_path": "6c712ee580435498113446ac682efcc00cab680a3b4f15bd27073ade5c0eb34a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 427, + 238, + 510, + 300 + ], + "blocks": [ + { + "bbox": [ + 427, + 238, + 510, + 300 + ], + "lines": [ + { + "bbox": [ + 427, + 238, + 510, + 300 + ], + "spans": [ + { + "bbox": [ + 427, + 238, + 510, + 300 + ], + "type": "image", + "image_path": "6bd1b346120cd70446996e5df9bbe70575020c94491592e0f4020894b7c819bf.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 429, + 300, + 510, + 361 + ], + "blocks": [ + { + "bbox": [ + 429, + 300, + 510, + 361 + ], + "lines": [ + { + "bbox": [ + 429, + 300, + 510, + 361 + ], + "spans": [ + { + "bbox": [ + 429, + 300, + 510, + 361 + ], + "type": "image", + "image_path": "bfa84d627f027b5cabf7370abddf61fa5b720604e608b37159e90f00ff8370c3.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 383, + 364, + 425, + 415 + ], + "blocks": [ + { + "bbox": [ + 383, + 364, + 425, + 415 + ], + "lines": [ + { + "bbox": [ + 383, + 364, + 425, + 415 + ], + "spans": [ + { + "bbox": [ + 383, + 364, + 425, + 415 + ], + "type": "image", + "image_path": "e1c1d0890a88776f034814686b64841cb1e31357eba2629bd3995babb7dd23ff.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 427, + 363, + 510, + 423 + ], + "blocks": [ + { + "bbox": [ + 427, + 363, + 510, + 423 + ], + "lines": [ + { + "bbox": [ + 427, + 363, + 510, + 423 + ], + "spans": [ + { + "bbox": [ + 427, + 363, + 510, + 423 + ], + "type": "image", + "image_path": "921706bfd91926b2ddfacffade625fe78c513762e6ee3133e762d51a20542847.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 394, + 426, + 528, + 432 + ], + "lines": [ + { + "bbox": [ + 394, + 426, + 528, + 432 + ], + "spans": [ + { + "bbox": [ + 394, + 426, + 528, + 432 + ], + "type": "text", + "content": "Manually Collected + Previous 2D Augmentation" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 514, + 258, + 556, + 289 + ], + "blocks": [ + { + "bbox": [ + 514, + 258, + 556, + 289 + ], + "lines": [ + { + "bbox": [ + 514, + 258, + 556, + 289 + ], + "spans": [ + { + "bbox": [ + 514, + 258, + 556, + 289 + ], + "type": "image", + "image_path": "35d892719a6309036eb90f1987157059a3f1914422d61cb54e28d76a70dda9bb.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 514, + 323, + 556, + 352 + ], + "blocks": [ + { + "bbox": [ + 514, + 323, + 556, + 352 + ], + "lines": [ + { + "bbox": [ + 514, + 323, + 556, + 352 + ], + "spans": [ + { + "bbox": [ + 514, + 323, + 556, + 352 + ], + "type": "image", + "image_path": "0c6dd535670c9fee89fab0af96d59af261a5c6532651dadedc7cbe5d9a74f3f0.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 514, + 383, + 556, + 415 + ], + "blocks": [ + { + "bbox": [ + 394, + 433, + 514, + 440 + ], + "lines": [ + { + "bbox": [ + 394, + 433, + 514, + 440 + ], + "spans": [ + { + "bbox": [ + 394, + 433, + 514, + 440 + ], + "type": "text", + "content": "Manually Collected Ours (Generated)" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 514, + 383, + 556, + 415 + ], + "lines": [ + { + "bbox": [ + 514, + 383, + 556, + 415 + ], + "spans": [ + { + "bbox": [ + 514, + 383, + 556, + 415 + ], + "type": "image", + "image_path": "cbbee3983b4fa45dcfa7ca8c7ae0abebe680859c6470cb42bd0fe8d571a960d1.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "bbox": [ + 45, + 503, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 503, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 503, + 301, + 723 + ], + "type": "text", + "content": "Abstract—Visuomotor policies learned from teleoperated demonstrations face challenges such as lengthy data collection, high costs, and limited data diversity. Existing approaches address these issues by augmenting image observations in RGB space or employing Real-to-Sim-to-Real pipelines based on physical simulators. However, the former is constrained to 2D data augmentation, while the latter suffers from imprecise physical simulation caused by inaccurate geometric reconstruction. This paper introduces RoboSplat, a novel method that generates diverse, visually realistic demonstrations by directly manipulating 3D Gaussians. Specifically, we reconstruct the scene through 3D Gaussian Splatting (3DGS), directly edit the reconstructed scene, and augment data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types. Comprehensive real-world experiments demonstrate that RoboSplat significantly enhances the generalization of visuomotor policies under diverse disturbances. Notably, while policies trained on hundreds of real-world demonstrations with additional" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "spans": [ + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "type": "text", + "content": "2D data augmentation achieve an average success rate of " + }, + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "type": "inline_equation", + "content": "57.2\\%" + }, + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "type": "text", + "content": ", RoboSplat attains " + }, + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "type": "inline_equation", + "content": "87.8\\%" + }, + { + "bbox": [ + 308, + 503, + 564, + 534 + ], + "type": "text", + "content": " in one-shot settings across six types of generalization in the real world." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 395, + 543, + 477, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 543, + 477, + 553 + ], + "spans": [ + { + "bbox": [ + 395, + 543, + 477, + 553 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 307, + 558, + 563, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 558, + 563, + 701 + ], + "spans": [ + { + "bbox": [ + 307, + 558, + 563, + 701 + ], + "type": "text", + "content": "Imitation learning for visuomotor policies has emerged as a promising paradigm in robot manipulation. However, policies learned through imitation often display limited robustness in deployment scenarios that differ substantially from expert demonstrations, primarily due to insufficient coverage of visual domains in the training data. Increasing the volume and diversity of real-world data is an effective strategy for enhancing robustness [12]; however, acquiring human-collected demonstrations is prohibitively time-consuming and labor-intensive. Consequently, substantial efforts have been devoted to generating diverse expert data without engaging with real-world environments [68, 69, 49, 8, 10, 67, 9, 35, 50, 59]." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 308, + 702, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 702, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 702, + 564, + 727 + ], + "type": "text", + "content": "Simulated environments offer a low-cost platform for data synthesis [49, 69]. However, the Sim-to-Real gap presents" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.13175v1 [cs.RO] 17 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 56, + 299, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 56, + 299, + 295 + ], + "spans": [ + { + "bbox": [ + 47, + 56, + 299, + 295 + ], + "type": "text", + "content": "significant challenges that hinder policy performance in real-world scenarios. Although Real-to-Sim-to-Real pipelines can narrow this gap considerably, replicating real-world manipulation scenes in simulation remains complex and labor-intensive. In particular, inaccuracies in geometric reconstructions often lead to imprecise physical simulations. Moreover, existing Real-to-Sim-to-Real approaches primarily generate data within monotonously reconstructed scenes, resulting in policies that are tailored only to those specific environments. Another line of work sheds light on augmenting image observations for better visual generalization. By editing different semantic parts of the image, these approaches generate novel scene configurations, in terms of background appearances [68, 9, 67, 10], embodiment types [8], object types [67], and camera views [50]. While these image augmentation methods are convenient, their limited consideration of 3D spatial information results in spatially inaccurate data generation. For more effective data augmentation, explicit 3D representations that retain accurate spatial information and are realistically renderable are required." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 295, + 299, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 295, + 299, + 415 + ], + "spans": [ + { + "bbox": [ + 47, + 295, + 299, + 415 + ], + "type": "text", + "content": "Recently, 3D Gaussian Splatting (3DGS) [25] has become a burgeoning approach to superior reconstruction and rendering. Thanks to its explicit representation of the scene, 3DGS enables interpretable editing of the reconstructed scene, which paves the way for generating novel manipulation configurations. Furthermore, as a 3D representation of the scene, 3DGS retains spatial information from the real world and allows for consistent rendering from multiple perspectives, which makes it the real-world counterpart of a simulator's graphics engine for generating novel demonstrations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 415, + 299, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 415, + 299, + 642 + ], + "spans": [ + { + "bbox": [ + 47, + 415, + 299, + 642 + ], + "type": "text", + "content": "Based on that, we propose RoboSplat, a novel and efficacious approach to demonstration generation with Gaussian Splitting. Empowered by 3DGS, we achieve a high-fidelity reconstruction of the manipulation scene. In order to align the reconstructed scene with real-world counterparts, we devise a novel frame alignment pipeline leveraging differentiable rendering of Gaussian Splitting. 3D Gaussians of different scene components are segmented using off-the-shelf segmentation models and the robot United Robotics Description Format (URDF). Remarkably, as illustrated in Fig. 1, a single collected expert trajectory enables us to generate novel demonstrations across a wide range of visual domains. To be specific, RoboSplat augments data across six types of generalization with five techniques: 3D Gaussian replacement for varying object types, scene appearance, and robot embodiments; equivariant transformations for different object poses; visual attribute editing for various lighting conditions; novel view synthesis for new camera perspectives; and 3D content generation for diverse object types." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 643, + 299, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 299, + 727 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 299, + 727 + ], + "type": "text", + "content": "Compared to previous Real-to-Sim-to-Real and image augmentation approaches, RoboSplat achieves more diverse and spatially accurate data generation. Extensive real-world experiments demonstrate that RoboSplat significantly enhances the robustness of visuomotor policies against multiple disturbances across tasks involving pick and place, tool use, functional motion, articulated object manipulation, and long" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "spans": [ + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "type": "text", + "content": "horizon skills. Specifically, compared to policies trained on hundreds of real-world demonstrations that are further enriched with 2D data augmentation, our method increases the average success rate from " + }, + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "type": "inline_equation", + "content": "57.2\\%" + }, + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "type": "inline_equation", + "content": "87.8\\%" + }, + { + "bbox": [ + 310, + 57, + 563, + 103 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 395, + 117, + 479, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 117, + 479, + 127 + ], + "spans": [ + { + "bbox": [ + 395, + 117, + 479, + 127 + ], + "type": "text", + "content": "II. RELATED WORK" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 136, + 506, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 136, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 310, + 136, + 506, + 148 + ], + "type": "text", + "content": "A. Generalizable Policy in Robot Manipulation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 310, + 153, + 563, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 153, + 563, + 344 + ], + "spans": [ + { + "bbox": [ + 310, + 153, + 563, + 344 + ], + "type": "text", + "content": "Recent advancements in manipulation have significantly enhanced generalization. Some studies design the policy architecture to endow it with equivariant properties, which is helpful to generalizing to different object poses [60, 61, 43, 13]. One-shot imitation learning approaches like [54, 48, 6, 53, 70] enable the policy to handle various object poses given only one demonstration. Furthermore, some other work focuses on generalizing the policy to different camera views [69, 46, 63], scene appearance [30, 51], and embodiments [12]. Some studies exploit the power of Large Language Models (LLMs) and Vision Language Models (VLMs) to endow robots with generalization abilities [23, 7, 39, 14]. Instead of adopting generalizable policy architecture, auxiliary learning objectives and powerful foundation models, our work is concentrated on generating high-quality, diverse, and realistic data to instill generalization abilities to the learned policy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 357, + 487, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 357, + 487, + 368 + ], + "spans": [ + { + "bbox": [ + 310, + 357, + 487, + 368 + ], + "type": "text", + "content": "B. Data Augmentation for Policy Learning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 374, + 563, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 374, + 563, + 647 + ], + "spans": [ + { + "bbox": [ + 310, + 374, + 563, + 647 + ], + "type": "text", + "content": "Given limited training data, data augmentation emerges as a way to improve the robustness of the policy. Previous work adopts image augmentation techniques to improve the resistance of visuomotor policies to observation noises [29, 28, 36, 37, 15, 19, 20]. However, these methods are mainly evaluated in simulated environments. To deploy learned policies in real-world setting, some previous work focuses on augmenting the appearance of the scene by incorporating image-inpainting models [67, 10, 9, 35]. Moreover, Tian et al. [50] generate augmented task demonstrations from different camera views and aim to learn a view-invariant policy. Ameperosa et al. [3]. Chen et al. [8] further devise a cross-embediment pipeline by inpainting different robots to image observations. Nonetheless, these studies mainly augment task demonstrations on 2D images, which lack spatial information. Hence, only limited augmentation can be achieved, and the augmented demonstrations might be unrealistic compared to those generated directly from 3D representations. Our work reconstructs the scene with 3D Gaussian Splatting and edits the 3D representation for data augmentation, enabling our policy to achieve comprehensive generalization across object poses, object types, camera views, lighting conditions, scene appearance, and various embodiments.." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 661, + 452, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 661, + 452, + 672 + ], + "spans": [ + { + "bbox": [ + 310, + 661, + 452, + 672 + ], + "type": "text", + "content": "C. Gaussian Splitting in Robotics" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 678, + 563, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 678, + 563, + 727 + ], + "spans": [ + { + "bbox": [ + 310, + 678, + 563, + 727 + ], + "type": "text", + "content": "3D Gaussian Splatting (3DGS) [25] serves as an explicit radiance field representation for real-time rendering of 3D scenes. Previous work leverages 3DGS to select proper grasp poses [24, 71]. Furthermore, Lu et al. [34] exploit 3DGS to" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 54, + 427, + 200 + ], + "blocks": [ + { + "bbox": [ + 55, + 54, + 427, + 200 + ], + "lines": [ + { + "bbox": [ + 55, + 54, + 427, + 200 + ], + "spans": [ + { + "bbox": [ + 55, + 54, + 427, + 200 + ], + "type": "image", + "image_path": "47ce93682b0b588d7c62b851e04f002ea8def19b6f3b84c40cf9f6e071ca61ba.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 201, + 427, + 320 + ], + "blocks": [ + { + "bbox": [ + 58, + 201, + 427, + 320 + ], + "lines": [ + { + "bbox": [ + 58, + 201, + 427, + 320 + ], + "spans": [ + { + "bbox": [ + 58, + 201, + 427, + 320 + ], + "type": "image", + "image_path": "9e2a04949ec35b9429c9369bf9c1e653d07dfe421af7984acd97fbe9fab0f4b9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 325, + 565, + 373 + ], + "lines": [ + { + "bbox": [ + 44, + 325, + 565, + 373 + ], + "spans": [ + { + "bbox": [ + 44, + 325, + 565, + 373 + ], + "type": "text", + "content": "Fig. 2: Method overview. We start from a single manually collected demonstration and multi-view images that capture the whole scene. The former provides task-related keyframes, while the latter helps scene reconstruction. After aligning the reconstructed frame with the real-world frame and segmenting different scene components, we carry out autonomous editing of the scene in pursuit of six types of augmentation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 430, + 58, + 553, + 320 + ], + "blocks": [ + { + "bbox": [ + 430, + 58, + 553, + 320 + ], + "lines": [ + { + "bbox": [ + 430, + 58, + 553, + 320 + ], + "spans": [ + { + "bbox": [ + 430, + 58, + 553, + 320 + ], + "type": "image", + "image_path": "58b7becf4a69a1455733dc9abbcd8d4f00caa882618621bd9e10cdabb7818fb2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 388, + 300, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 388, + 300, + 568 + ], + "spans": [ + { + "bbox": [ + 45, + 388, + 300, + 568 + ], + "type": "text", + "content": "construct dynamics of the scene for multi-task robot manipulation. In order to predict the consequence of robots' interactions with the environment, Shorinwa et al. [47] leverage 3D semantic masking and infilling to visualize the motions of the objects that result from the interactions. Another line of work adopts the Real-to-Sim-to-Real pipeline, and utilizes 3DGS to reconstruct the real-world scene [31, 40, 56, 52]. However, importing reconstructed real-world objects to simulation is a strenuous process, and physical interactions tend to suffer from large sim-to-real gaps due to the flawed geometric reconstruction and lack of physical information in 3D reconstruction. Some recent work on 3DGS is centered around editing and relighting of the scene [65, 32, 17]. Our method enables autonomous editing of the reconstructed scene to generate diverse demonstrations with various configurations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 128, + 577, + 218, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 577, + 218, + 588 + ], + "spans": [ + { + "bbox": [ + 128, + 577, + 218, + 588 + ], + "type": "text", + "content": "III. PRELIMINARIES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "spans": [ + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": "3D Gaussian Splatting (3DGS) [25] utilizes multi-view images for high-fidelity scene reconstruction. The scene is represented by a set of Gaussians " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "\\{g_i\\}_{i=1}^N" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": ", where each Gaussian " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "g_i" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": " consists of a position vector " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "\\mu_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": ", a rotation matrix " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "R_i \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": ", a scaling matrix " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "S_i = \\text{diag}(s)(s \\in \\mathbb{R}^3)" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": ", an opacity factor " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "\\alpha_i \\in \\mathbb{R}" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": ", and spherical harmonic coefficients " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": " that encapsulate the view-dependent color appearance of the Gaussian. Given the scaling matrix and rotation matrix, the covariance matrix " + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "inline_equation", + "content": "\\Sigma_i" + }, + { + "bbox": [ + 45, + 594, + 300, + 701 + ], + "type": "text", + "content": " is calculated as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 134, + 712, + 212, + 727 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 712, + 212, + 727 + ], + "spans": [ + { + "bbox": [ + 134, + 712, + 212, + 727 + ], + "type": "interline_equation", + "content": "\\Sigma_ {i} = R _ {i} S _ {i} S _ {i} ^ {\\top} R _ {i} ^ {\\top}.", + "image_path": "b7a8ceac4b8ee514a489212603ea549598f4d1fc5abfd59ccd58c611623633b3.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 388, + 564, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 564, + 436 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 564, + 436 + ], + "type": "text", + "content": "To derive the color " + }, + { + "bbox": [ + 308, + 388, + 564, + 436 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 308, + 388, + 564, + 436 + ], + "type": "text", + "content": " of a particular pixel during rendering procedure, 3DGS exploits a typical neural point-based approach, similar to Kopanas et al. [27], where the final color value is calculated as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 377, + 451, + 493, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 451, + 493, + 483 + ], + "spans": [ + { + "bbox": [ + 377, + 451, + 493, + 483 + ], + "type": "interline_equation", + "content": "C = \\sum_ {i = 1} ^ {N} c _ {i} o _ {i} \\prod_ {j = 1} ^ {j = i - 1} (1 - o _ {j}),", + "image_path": "ff405c68cb5d967e4b0eeafc5ac0ad858d489b5593cd217e835c1ae30bf17ff3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 378, + 485, + 494, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 485, + 494, + 508 + ], + "spans": [ + { + "bbox": [ + 378, + 485, + 494, + 508 + ], + "type": "interline_equation", + "content": "o _ {i} = \\alpha_ {i} \\cdot \\exp \\left(\\frac {1}{2} \\delta_ {i} ^ {\\intercal} \\Sigma_ {i, 2 D} ^ {- 1} \\delta_ {i}\\right),", + "image_path": "da69f2f768f40b9de798fcb498306faa3f03ffbefd1e161872cb6fab13332383.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "spans": [ + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": " is the number of Gaussians that overlap with the pixel. Besides, " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": " denotes the opacity of the " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": "-th Gaussian. " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "\\delta_{i} \\in \\mathbb{R}^{2}" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": " denotes the offset between the current pixel and the center of the " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": "-th Gaussian projected to 2D image. " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "\\Sigma_{i,2D} \\in \\mathbb{R}^{2 \\times 2}" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": " stands for the covariance matrix of the " + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 308, + 511, + 564, + 583 + ], + "type": "text", + "content": "-th Gaussian projected to 2D image." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 391, + 590, + 481, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 590, + 481, + 601 + ], + "spans": [ + { + "bbox": [ + 391, + 590, + 481, + 601 + ], + "type": "text", + "content": "IV. METHODOLOGY" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 606, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 606, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 307, + 606, + 564, + 727 + ], + "type": "text", + "content": "To generate high-fidelity and diverse data from a single expert trajectory, we present RoboSplat, a novel demonstration generation approach based on 3DGS. An overview of our method is shown in Fig. 2. In this section, we describe RoboSplat in detail. We begin with the process of reconstruction and preprocessing in Sec. IV-A, which includes object and scene reconstruction, frame alignment with differentiable rendering, and novel pose generation for the robot and objects. With all the Gaussian models ready, we generate novel demonstrations and perform data augmentation in terms of object" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 57, + 301, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 57, + 301, + 117 + ], + "spans": [ + { + "bbox": [ + 45, + 57, + 301, + 117 + ], + "type": "text", + "content": "poses, object types, camera views, scene appearance, lighting conditions, and embodiments, as described in Sec. IV-B. Finally, a visuomotor policy is trained on the augmented demonstrations and directly deployed on real robots, as detailed in Sec. IV-C." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 125, + 203, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 125, + 203, + 137 + ], + "spans": [ + { + "bbox": [ + 45, + 125, + 203, + 137 + ], + "type": "text", + "content": "A. Reconstruction and Preprocessing" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "text", + "content": "In pursuit of a high-fidelity reconstruction of the scene, we first capture a set of RGB images whose corresponding viewpoints should be as various as possible. During this process, the scene remains static and the robot is fixed at its default joint configuration, which we refer to as " + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "inline_equation", + "content": "q_{\\mathrm{default}}" + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "text", + "content": ". With the images ready, we utilize COLMAP [45, 44] to obtain a sparse scene reconstruction and an estimation of the camera pose corresponding to each image. To further enhance the reconstruction precision, we gain an depth estimation for each image with Depth Anything [62]. The images, camera poses, and depth prior serve as inputs to 3DGS [25], which returns 3D Gaussians representing the entire scene " + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{scene}}" + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "text", + "content": ", which contains 3D Gaussians corresponding to the robot, dubbed " + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{robot}}" + }, + { + "bbox": [ + 45, + 140, + 301, + 297 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "spans": [ + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "type": "text", + "content": "However, the reconstructed 3D Gaussians of the robot are represented in an arbitrary frame " + }, + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{scene}}" + }, + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "type": "text", + "content": ", and hence we need to align it with the real-world coordinate frame " + }, + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{real}}" + }, + { + "bbox": [ + 45, + 297, + 301, + 344 + ], + "type": "text", + "content": " to facilitate automated editing." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "spans": [ + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": "The robot URDF gives us access to the robot base frame " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{URDF}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": ". The real-world robot frame " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{robot}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{URDF}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{real}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": " are all aligned with each other. Hence, the actual problem turns into the frame alignment from " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{scene}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{URDF}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": ". We denote the transformation matrix as " + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{URDF, scene}}" + }, + { + "bbox": [ + 45, + 345, + 301, + 523 + ], + "type": "text", + "content": ". While point cloud registration approaches, such as Iterative Closest Point (ICP) [5], serve as a common solution to it, we find that there is still major misalignment between the two frames aligned with point cloud registration, as illustrated in Fig. 3. The reason lies in the fact that point cloud registration is based on point coordinates, whereas 3D Gaussians have a scale attribute, which causes a mismatch between point coordinates and the appearance. Therefore, we exploit the differentiable rendering of 3DGS to do further fine-grained alignment, as depicted in Fig. 4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "spans": [ + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{T}}_{\\mathrm{URDF, scene}}^{0}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " is the initial transformation matrix obtained through ICP. We first apply " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{T}}_{\\mathrm{URDF, scene}}^{0}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{robot}}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " leading to a partially aligned robot Gaussian " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{G}}_{\\mathrm{robot}}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": ". The aim of further alignment is to derive another transformation matrix " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{T}}_{\\mathrm{rel}}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": ", such that applying " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{T}}_{\\mathrm{rel}}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{G}}_{\\mathrm{robot}}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " gives a better alignment to the pose of the robot defined in URDF. For this sake, we select " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " canonical camera views to capture the segmentation masks " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{I}_i^{\\mathrm{URDF}}\\}_{i = 1}^N" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{I}_i^{\\mathrm{Gaussian}}\\}_{i = 1}^N" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " (the pixel value is 1 if it belongs to the robot; otherwise, it is 0) with the robot URDF and " + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{G}}_{\\mathrm{robot}}" + }, + { + "bbox": [ + 45, + 524, + 301, + 670 + ], + "type": "text", + "content": " respectively. The pixel-wise differences between the images from the same canonical views are averaged to form the objective function of alignment:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 98, + 677, + 249, + 710 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 677, + 249, + 710 + ], + "spans": [ + { + "bbox": [ + 98, + 677, + 249, + 710 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {a l i g n}} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left(\\mathcal {I} _ {i} ^ {\\text {U R D F}} - \\mathcal {I} _ {i} ^ {\\text {G a u s s i a n}}\\right) ^ {2}.", + "image_path": "7487552d7951c122ba5686051d88b19406f8a33cc142bc67f1dc01e59f1ec80c.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 714, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 714, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 56, + 714, + 301, + 727 + ], + "type": "text", + "content": "Due to the differentiability of Gaussian Splitting, we can" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 345, + 51, + 533, + 152 + ], + "blocks": [ + { + "bbox": [ + 345, + 51, + 533, + 152 + ], + "lines": [ + { + "bbox": [ + 345, + 51, + 533, + 152 + ], + "spans": [ + { + "bbox": [ + 345, + 51, + 533, + 152 + ], + "type": "image", + "image_path": "1a73a53de5cf7d5969a3255354730536448b391b83ca1ad3bb975821256acbe8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 156, + 566, + 242 + ], + "lines": [ + { + "bbox": [ + 307, + 156, + 566, + 242 + ], + "spans": [ + { + "bbox": [ + 307, + 156, + 566, + 242 + ], + "type": "text", + "content": "Fig. 3: Comparison of frame alignment results between ICP and fine-grained optimization with differentiable rendering. The semi-transparent orange overlay represents the ground truth rendered with URDF from the same camera view. The left shows the results of ICP, which have larger errors, while the right shows the results after further fine-grained optimization using differentiable rendering." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 253, + 563, + 413 + ], + "blocks": [ + { + "bbox": [ + 310, + 253, + 563, + 413 + ], + "lines": [ + { + "bbox": [ + 310, + 253, + 563, + 413 + ], + "spans": [ + { + "bbox": [ + 310, + 253, + 563, + 413 + ], + "type": "image", + "image_path": "b14d38a8e57726af6c838d49b428567bd4e1a21daa955cdaf7fc7f49c8370334.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 417, + 564, + 489 + ], + "lines": [ + { + "bbox": [ + 307, + 417, + 564, + 489 + ], + "spans": [ + { + "bbox": [ + 307, + 417, + 564, + 489 + ], + "type": "text", + "content": "Fig. 4: Illustration of frame alignment with differentiable rendering. The loss is calculated between the mask rendered using Gaussian Splatting and the mask rendered with URDF. Subsequently, backpropagation and gradient descent are used to optimize the translation, rotation, and scale, which are then applied to the 3D Gaussians." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "spans": [ + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": "rewrite the objective function as " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{align}}(\\hat{T}_{\\mathrm{rel}})" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": " and optimize " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\hat{T}_{\\mathrm{rel}}" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": " through gradient descent. The optimized " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\hat{T}_{\\mathrm{rel}}" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": " is composed with " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\hat{T}_{\\mathrm{URDF, scene}}^{0}" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": ", the result of which is applied to " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{scene}}" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": " to form the scene reconstruction in " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{real}}" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": ". We refer to the aligned 3D Gaussians as " + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{scene}}^{*}" + }, + { + "bbox": [ + 308, + 496, + 563, + 557 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "spans": [ + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "content": "In order to decompose the scene into different parts, we first leverage Grounded-SAM [41] to perform task-related object segmentation. Then, the masked images are used to reconstruct 3D Gaussians for the objects. The 3D Gaussians corresponding to each link of the robot are segmented using the point cloud of each link in " + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{URDF}}" + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "content": ", which can be obtained with the robot's URDF and the renderer. Specifically, if the position of a 3D Gaussian is within a threshold distance from the point cloud of a link, the 3D Gaussian is assigned to that link. If a 3D Gaussian does not belong to any object or any link of the robot, it is classified as background. We suppose that the robot has " + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "content": " links and there are totally " + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "content": " objects in the scene. The reconstructed robot links, objects, and background are denoted as " + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{robot}}^* = \\{\\mathcal{G}_{\\mathrm{robot},i}^*\\}_{i=1}^l" + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{obj}}^* = \\{\\mathcal{G}_{\\mathrm{obj},j}^*\\}_{j=1}^k" + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 307, + 558, + 564, + 728 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{bg}}^*" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 57, + 99, + 68 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 57, + 99, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 57, + 99, + 68 + ], + "type": "text", + "content": "respectively." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 68, + 301, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 68, + 301, + 128 + ], + "spans": [ + { + "bbox": [ + 45, + 68, + 301, + 128 + ], + "type": "text", + "content": "Similar to our frame alignment strategy, we utilize differentiable rendering to estimate the deployed camera poses in order to narrow the gap between the generated data and the deployment environment. The camera extrinsics are optimized through gradient descent, with the optimization objective:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 99, + 140, + 249, + 154 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 140, + 249, + 154 + ], + "spans": [ + { + "bbox": [ + 99, + 140, + 249, + 154 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c a m e r a}} = S S I M \\left(\\mathcal {I} _ {\\text {E x p e r t}}, \\mathcal {I} _ {\\text {G a u s s i a n}}\\right) ^ {2},", + "image_path": "5635c35fe428983dd5a8ea497bcc0a98f170f7860c660c5eed3f8e71338f328a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "spans": [ + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{Expert}}" + }, + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "type": "text", + "content": " denotes the image obtained from the collected expert demonstration, " + }, + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{Gaussian}}" + }, + { + "bbox": [ + 45, + 159, + 300, + 219 + ], + "type": "text", + "content": " represents the rendered image with reconstructed 3D Gaussians, and SSIM refers to Structural Similarity, which measures the perceptual similarity between two images." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": "Nonetheless, before moving on to novel demonstration generation, we need to figure out how to generate 3D Gaussians for the robot under novel joint configurations. To achieve that, we leverage the link-wise Gaussians " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{G}_{\\mathrm{robot},i}^{*}\\}_{i = 1}^{l}" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": " and the default joint configuration " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "q_{\\mathrm{default}}" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": ". For each link " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "1 \\leqslant i \\leqslant l" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": ", we access its relative pose to robot base frame under arbitrary joint configuration " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": " through forward kinematics, denoted as " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{fk}}^i(q)" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": ". Hence, by transforming each link " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{\\mathrm{fk}}^i(q)\\mathcal{T}_{\\mathrm{fk}}^i(q_{\\mathrm{default}})^{-1}" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": ", we derive the corresponding 3D Gaussians under configuration " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": ". The entire 3D Gaussians are thereby derived by composing Gaussians of all " + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 45, + 220, + 301, + 376 + ], + "type": "text", + "content": " links. As for the manipulated objects, we apply transformations in a similar manner. The way 3D Gaussians are transformed is detailed in Appendix A." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 385, + 198, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 385, + 198, + 396 + ], + "spans": [ + { + "bbox": [ + 45, + 385, + 198, + 396 + ], + "type": "text", + "content": "B. Novel Demonstration Generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "type": "text", + "content": "Utilizing 3D Gaussians in " + }, + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{real}}" + }, + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "type": "text", + "content": ", we implement our demonstration augmentation process, which systematically enhances the expert demonstration " + }, + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{expert}}" + }, + { + "bbox": [ + 45, + 401, + 300, + 460 + ], + "type": "text", + "content": " across six aspects: object poses, object types, camera views, embodiment types, scene appearance, and lighting conditions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 462, + 122, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 462, + 122, + 473 + ], + "spans": [ + { + "bbox": [ + 57, + 462, + 122, + 473 + ], + "type": "text", + "content": "1) Object Pose" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 474, + 300, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 474, + 300, + 617 + ], + "spans": [ + { + "bbox": [ + 45, + 474, + 300, + 617 + ], + "type": "text", + "content": "To perform object pose augmentation, we first extract keyframes from the expert demonstration using a heuristic approach. Whenever the gripper action toggles or joint velocities approach zero, we consider the current time step as a keyframe and record the end-effector pose with respect to robot base frame. After that, we apply rigid transformations to the target objects that are involved in the expert demonstration. The end-effector poses at keyframes are transformed equivariantly according to the target object. Eventually, we generate trajectories between consecutive keyframe poses with motion planning, the combination of which makes a complete augmented demonstration with novel object poses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 618, + 122, + 630 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 122, + 630 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 122, + 630 + ], + "type": "text", + "content": "2) Object Type" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 630, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 630, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 630, + 301, + 727 + ], + "type": "text", + "content": "The object types can be augmented with 3D Content Generation. We first prompt GPT-4 [2] to generate approximately 50 names of objects that can be grasped. Then, we use these object names as prompts to generate corresponding 3D Gaussians with a 3D content generation model [57]. We utilize an off-the-shelf grasping algorithm [16] to generate grasp poses with respect to the object frame. As we generate different object poses for augmentation, we obtain the corresponding" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 57, + 564, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 57, + 564, + 104 + ], + "spans": [ + { + "bbox": [ + 307, + 57, + 564, + 104 + ], + "type": "text", + "content": "end-effector poses by composing object pose and the grasp pose relative to the object, which turn into the keyframe poses in new demonstrations. The entire augmented trajectory is generated in the same manner as IV-B1." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 109, + 391, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 109, + 391, + 120 + ], + "spans": [ + { + "bbox": [ + 318, + 109, + 391, + 120 + ], + "type": "text", + "content": "3) Camera View" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "spans": [ + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": "One merit of 3DGS lies in its ability to perform novel view synthesis. Thereby, we are able to choose different camera poses from " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{expert}}" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": " and obtain novel-view demonstrations. Although we can render novel-view observations from arbitrary camera pose, we need to ensure that the augmented camera view does not deviate so much from the expert that it loses sight of the manipulation scene. Hence, we first designate a target point " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "O_{c} = (x_{c},y_{c},z_{c})" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{real}}" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": ", towards which the camera should face during the entire episode. We then define a coordinate frame " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_c" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": ", whose origin is " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "O_{c}" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": " and orientation is the same as " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\mathrm{real}}" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": ". The position of camera is represented by spherical coordinates " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "(r,\\theta ,\\varphi)" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_c" + }, + { + "bbox": [ + 307, + 123, + 564, + 328 + ], + "type": "text", + "content": ". Thus, by limiting the target point within the manipulation scene and randomizing the spherical coordinates, we are able to generate camera poses that produce meaningful observations yet possess diversity. The hyperparameters of randomization for the target point and the spherical coordinates are detailed in Appendix B." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 332, + 409, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 332, + 409, + 344 + ], + "spans": [ + { + "bbox": [ + 318, + 332, + 409, + 344 + ], + "type": "text", + "content": "4) Embodiment Type" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "spans": [ + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "text", + "content": "To generalize the expert demonstration to different types of robots, we replace " + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{robot}}^*" + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "text", + "content": " with the 3D Gaussians of another embodiment, dubbed " + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{robot}}^{\\mathrm{new}}" + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "text", + "content": ", which is attained from the corresponding URDF file or real-world reconstruction. The keyframe end-effector poses are reused because they are embodiment-agnostic action representations. Hence, through motion planning, we can easily derive the end-effector poses and joint positions of the new embodiment for all time steps in augmented demonstrations. The 3D Gaussians of the new embodiment under novel joint configurations is obtained from " + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{robot}}^{\\mathrm{new}}" + }, + { + "bbox": [ + 307, + 347, + 564, + 502 + ], + "type": "text", + "content": " as mentioned in Sec. IV-A. The policy trained on these augmented demonstrations is directly deployed on novel embodiments." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 507, + 410, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 507, + 410, + 519 + ], + "spans": [ + { + "bbox": [ + 318, + 507, + 410, + 519 + ], + "type": "text", + "content": "5) Scene Appearance" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 522, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 522, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 307, + 522, + 564, + 727 + ], + "type": "text", + "content": "Inconsistency between scene appearance accounts for a large visual gap between training and deployment environments. To resolve this issue, we propose to exploit reconstructed diverse 3D scenes and also large-scale image datasets to augment the scene appearance. We adopt COCO [33] as the image dataset, and attach images to the table top and background 3D Gaussian planes that surround the entire manipulation scene. Moreover, we gather datasets for 3D reconstruction [22, 66, 26, 4], and derive corresponding 3D Gaussians by 3DGS training. The resulting 3D Gaussian scenes substitute for " + }, + { + "bbox": [ + 307, + 522, + 564, + 727 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\mathrm{bg}}^*" + }, + { + "bbox": [ + 307, + 522, + 564, + 727 + ], + "type": "text", + "content": ", forming novel scene appearance for data augmentation. The edge of utilizing reconstructed 3D scenes is their consistent and diverse geometry across multiple camera views, which helps produce more realistic demonstrations. Nevertheless, due to the expense of 3DGS training on large-scale reconstruction datasets, we complement them with 2D images for greater appearance diversity." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 57, + 149, + 68 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 57, + 149, + 68 + ], + "spans": [ + { + "bbox": [ + 55, + 57, + 149, + 68 + ], + "type": "text", + "content": "6) Lighting Condition" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "spans": [ + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "content": "Discrepancy in lighting conditions is another barrier to deploying trained policy in unseen scenarios. To compensate for that, we augment the diffuse color of each Gaussian in the reconstructed scene through random scaling, offset, and noise. Concretely, for a Gaussian with original diffuse color " + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "inline_equation", + "content": "(r,g,b)" + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "content": ", the augmented diffuse color values can be expressed as " + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "inline_equation", + "content": "(s_r r + o_r + \\Delta_r, s_g g + o_g + \\Delta_g, s_b b + o_b + \\Delta_b)" + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "inline_equation", + "content": "(s_r, s_g, s_b)" + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "content": " stand for scaling factors, " + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "inline_equation", + "content": "(o_r, o_g, o_b)" + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "content": " stand for offsets, and " + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "inline_equation", + "content": "(\\Delta_r, \\Delta_g, \\Delta_b)" + }, + { + "bbox": [ + 45, + 68, + 301, + 258 + ], + "type": "text", + "content": " stand for random Gaussian noise. The scaling factors and offsets simulate changes in color contrast and scene brightness. Thus, they are shared among all the Gaussians in the scene. On the other hand, the random Gaussian noise is sampled independently for each Gaussian to simulate noise in images captured by cameras. The details of scaling factors, offsets, and Gaussian noise are elaborated in Appendix B." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 259, + 301, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 259, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 45, + 259, + 301, + 284 + ], + "type": "text", + "content": "An illustration of augmented demonstrations with six types of generalizations can be found in Appendix B." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 289, + 126, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 289, + 126, + 301 + ], + "spans": [ + { + "bbox": [ + 45, + 289, + 126, + 301 + ], + "type": "text", + "content": "C. Policy Training" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "spans": [ + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "content": "We employ a modern, widely adopted transformer-based architecture [18, 51, 38, 55] to serve as the policy network, which is detailed in Appendix C. We process RGB images with ResNet-18 [21], and encode joint state using a multilayer perceptron (MLP). The latent of images and robot state is fed into a transformer encoder. Finally, an action decoder utilizes an MLP to convert the action latent into the action vector " + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "content": ". The policy is trained with Behavioural Cloning (BC) in an end-to-end manner, aiming to maximize the likelihood of expert actions in demonstrations. We denote " + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "inline_equation", + "content": "o_k \\triangleq (I_k, q_k)" + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "content": " as the observation at the " + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "content": "-th frame of demonstrations " + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 45, + 304, + 300, + 448 + ], + "type": "text", + "content": " as our policy. The loss function can then be expressed as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 102, + 458, + 244, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 458, + 244, + 472 + ], + "spans": [ + { + "bbox": [ + 102, + 458, + 244, + 472 + ], + "type": "interline_equation", + "content": "\\mathcal {L} ^ {\\mathrm {B C}} = \\mathbb {E} _ {(o _ {k}, a _ {k}) \\sim \\mathcal {D}} \\| a _ {k} - \\pi (o _ {k}) \\| ^ {2}.", + "image_path": "79e5bb260cccc21139c56b6b96836f36c03754a598fc315e4f626c94f71eb460.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 478, + 301, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 478, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 45, + 478, + 301, + 550 + ], + "type": "text", + "content": "Specifically, " + }, + { + "bbox": [ + 45, + 478, + 301, + 550 + ], + "type": "inline_equation", + "content": "I_{k}" + }, + { + "bbox": [ + 45, + 478, + 301, + 550 + ], + "type": "text", + "content": " consists of two images from different eye-on-base cameras. We adopt relative end-effector pose as the action representation, which depicts the relative transformation between two consecutive end-effector poses under robot base frame. Further details of the training process can be found in Appendix D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 133, + 556, + 212, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 556, + 212, + 567 + ], + "spans": [ + { + "bbox": [ + 133, + 556, + 212, + 567 + ], + "type": "text", + "content": "V. EXPERIMENTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 571, + 300, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 571, + 300, + 618 + ], + "spans": [ + { + "bbox": [ + 45, + 571, + 300, + 618 + ], + "type": "text", + "content": "We conduct comprehensive experiments in the real world to verify the effectiveness of our demonstration generation pipeline. Specifically, we aim to answer: given a single expert demonstration and multi-view images of the scene," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 618, + 301, + 727 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 45, + 618, + 299, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 618, + 299, + 642 + ], + "spans": [ + { + "bbox": [ + 45, + 618, + 299, + 642 + ], + "type": "text", + "content": "1) How efficient is data generation compared to manually collecting data?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 642, + 299, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 642, + 299, + 677 + ], + "spans": [ + { + "bbox": [ + 45, + 642, + 299, + 677 + ], + "type": "text", + "content": "2) How does the policy trained on generated demonstrations perform across various tasks compared to that trained on manually collected data?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 677, + 299, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 677, + 299, + 701 + ], + "spans": [ + { + "bbox": [ + 45, + 677, + 299, + 701 + ], + "type": "text", + "content": "3) How does the policy perform as the generated data scale up?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 701, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 701, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 701, + 301, + 727 + ], + "type": "text", + "content": "4) Can generated demonstrations enhance the robustness of the policy when facing various deployment settings, such" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 315, + 58, + 556, + 223 + ], + "blocks": [ + { + "bbox": [ + 315, + 58, + 556, + 223 + ], + "lines": [ + { + "bbox": [ + 315, + 58, + 556, + 223 + ], + "spans": [ + { + "bbox": [ + 315, + 58, + 556, + 223 + ], + "type": "image", + "image_path": "dd23fb58097a52a0676ee630392ebd652e6e0cd011735ee77e91a73c4726d8b8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 227, + 564, + 262 + ], + "lines": [ + { + "bbox": [ + 308, + 227, + 564, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 227, + 564, + 262 + ], + "type": "text", + "content": "Fig. 5: Real-world experiment setup. We employ a Franka Research 3 Robot and two eye-on-base RealSense D435i cameras." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 277, + 563, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 277, + 563, + 301 + ], + "spans": [ + { + "bbox": [ + 308, + 277, + 563, + 301 + ], + "type": "text", + "content": "as changes in object types, camera views, scene appearance, lighting conditions, and embodiment types?" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 313, + 406, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 313, + 406, + 325 + ], + "spans": [ + { + "bbox": [ + 308, + 313, + 406, + 325 + ], + "type": "text", + "content": "A. Experimental Setup" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 330, + 563, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 330, + 563, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 330, + 563, + 426 + ], + "type": "text", + "content": "The real-world experiment setup is presented in Fig. 5. Concretely, we collect the expert demonstration on Franka Research 3 (FR3) Robot. Two Intel Realsense D435i eye-on-base cameras are mounted on the table top, capturing RGB image observations for the policy. We employ a 3D SpaceMouse to collect teleoperated demonstrations at a frequency of " + }, + { + "bbox": [ + 307, + 330, + 563, + 426 + ], + "type": "inline_equation", + "content": "10\\mathrm{Hz}" + }, + { + "bbox": [ + 307, + 330, + 563, + 426 + ], + "type": "text", + "content": ". Policy inference is carried out on an NVIDIA RTX4090 GPU, with a latency of 0.1s imposed." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 426, + 563, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 426, + 563, + 474 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 563, + 474 + ], + "type": "text", + "content": "In order to manifest the generalization ability of our pipeline to different task settings, we select five tasks for evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place, and Sweep." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": "In Pick Object task, the policy picks up a target object which is placed at different poses within a " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}\\times 40\\mathrm{cm}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " workspace. In CloseDrawer task, the policy closes a drawer whose position is constrained to a " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "15\\mathrm{cm}\\times 40\\mathrm{cm}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " workspace, while its rotation about the z-axis is restricted to " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "\\left[-\\frac{\\pi}{8},\\frac{\\pi}{8}\\right]" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": ". In Pick-Place-Close task, the policy is expected to grasp an object, place it in the drawer, and then close the drawer. The drawer is placed in a " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "5\\mathrm{cm}\\times 5\\mathrm{cm}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " workspace, with a fixed orientation. The target object is located in a " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "10\\mathrm{cm}\\times 10\\mathrm{cm}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " workspace, whose rotation falls into range " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "\\left[-\\frac{\\pi}{8},\\frac{\\pi}{8}\\right]" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": ". In Dual Pick-Place task, the policy attempts to pick two target objects in a row and place them in a fixed drawer. Both of the objects are located in " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "10\\mathrm{cm}\\times 10\\mathrm{cm}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " workspaces, with yaw angles between " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "-\\frac{\\pi}{8}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "\\frac{\\pi}{8}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": ". In Sweep task, the robot should first pick up a broom and then sweeps the chocolate beans into a dustpan. The broom is randomly placed within a " + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "inline_equation", + "content": "10\\mathrm{cm}\\times 10\\mathrm{cm}" + }, + { + "bbox": [ + 307, + 475, + 564, + 727 + ], + "type": "text", + "content": " area, and the chocolate beans are randomly placed on the chopping board. Task setups are illustrated in Fig. 6. These five tasks require proficiency in executing basic pick-and-place actions, manipulating articulated objects, performing long-horizon tasks, and demonstrating skills involving tool use and" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 53, + 153, + 152 + ], + "blocks": [ + { + "bbox": [ + 51, + 53, + 153, + 152 + ], + "lines": [ + { + "bbox": [ + 51, + 53, + 153, + 152 + ], + "spans": [ + { + "bbox": [ + 51, + 53, + 153, + 152 + ], + "type": "image", + "image_path": "53bdabfc3bcfe1e3547e7ff8ab00b81643d6111bcc3f0286b640241c72bcd876.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 157, + 563, + 180 + ], + "lines": [ + { + "bbox": [ + 45, + 157, + 563, + 180 + ], + "spans": [ + { + "bbox": [ + 45, + 157, + 563, + 180 + ], + "type": "text", + "content": "Fig. 6: Task illustration. We design five manipulation tasks for real-world evaluation: Pick Object, Close Printer, Pick-Place-Close, Dual Pick-Place and Sweep, whose details are elaborated in Sec. V-A." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 153, + 53, + 254, + 152 + ], + "blocks": [ + { + "bbox": [ + 153, + 53, + 254, + 152 + ], + "lines": [ + { + "bbox": [ + 153, + 53, + 254, + 152 + ], + "spans": [ + { + "bbox": [ + 153, + 53, + 254, + 152 + ], + "type": "image", + "image_path": "b9a164729ad40c6dadad6a0d166fe8cdcb32404ecd6fce30f340c478aaf9a819.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 255, + 53, + 356, + 152 + ], + "blocks": [ + { + "bbox": [ + 255, + 53, + 356, + 152 + ], + "lines": [ + { + "bbox": [ + 255, + 53, + 356, + 152 + ], + "spans": [ + { + "bbox": [ + 255, + 53, + 356, + 152 + ], + "type": "image", + "image_path": "8549171b20db883a106ecd8a4fb7e8188d2842e143a98d206dfd88352a0f646f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 357, + 53, + 458, + 152 + ], + "blocks": [ + { + "bbox": [ + 357, + 53, + 458, + 152 + ], + "lines": [ + { + "bbox": [ + 357, + 53, + 458, + 152 + ], + "spans": [ + { + "bbox": [ + 357, + 53, + 458, + 152 + ], + "type": "image", + "image_path": "056fc96eff5a26b8330467dc5ccaab099169d151244129e6a8ca159a68dc2a9f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 459, + 53, + 559, + 152 + ], + "blocks": [ + { + "bbox": [ + 459, + 53, + 559, + 152 + ], + "lines": [ + { + "bbox": [ + 459, + 53, + 559, + 152 + ], + "spans": [ + { + "bbox": [ + 459, + 53, + 559, + 152 + ], + "type": "image", + "image_path": "60af5eb5b5e6d990c8aedc4700c76702cdd364cee0151b662b675636ff1cd24d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 191, + 301, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 191, + 301, + 214 + ], + "spans": [ + { + "bbox": [ + 45, + 191, + 301, + 214 + ], + "type": "text", + "content": "functional motion. Together, they provide a comprehensive evaluation across various task settings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 215, + 301, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 215, + 301, + 358 + ], + "spans": [ + { + "bbox": [ + 45, + 215, + 301, + 358 + ], + "type": "text", + "content": "We also conduct extensive real-world experiments to prove the effectiveness of our data generation pipeline in terms of different types of generalization. Notably, the evaluation of object pose generalization is incorporated into all experiments, including those focused on the other five types of generalization (object types, camera views, embodiment types, lighting conditions, and scene appearance). This is because object pose generalization is a fundamental requirement for task completion ability. For the other five types of generalization, the details are provided in Sec. V-D. Success rate (SR) is chosen as the evaluation metric in all experiments. Each policy is evaluated with 30 trials for a certain evaluation setting." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 365, + 231, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 365, + 231, + 376 + ], + "spans": [ + { + "bbox": [ + 45, + 365, + 231, + 376 + ], + "type": "text", + "content": "B. Efficiency of Augmenting Demonstrations" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 380, + 300, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 380, + 300, + 487 + ], + "spans": [ + { + "bbox": [ + 45, + 380, + 300, + 487 + ], + "type": "text", + "content": "To answer Question 1, we need to justify that our pipeline is economical with both labor and time when generating data. The labor-saving property is obvious because demonstrations are generated automatically in our pipeline. We compare the average time consumption of manually collecting a real-world demonstration to that of generating a demonstration through our pipeline. Specifically, we adopt eight processes on an NVIDIA RTX 4090 GPU for paralleled data generation to efficiently utilize computational resources." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 488, + 301, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 488, + 301, + 583 + ], + "spans": [ + { + "bbox": [ + 45, + 488, + 301, + 583 + ], + "type": "text", + "content": "The comparison study is conducted on all five tasks, and the result is shown in Table I. Our data generation pipeline that executed on a single GPU is more than 29 times faster than collecting data in the real world, with an average time consumption of 0.64s across all five tasks. With no human interference, our demonstration generation approach is able to generate visually diverse training data with little time expenditure." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 591, + 289, + 603 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 591, + 289, + 603 + ], + "spans": [ + { + "bbox": [ + 45, + 591, + 289, + 603 + ], + "type": "text", + "content": "C. Performance of the Policy Trained on Augmented Data" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 606, + 300, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 606, + 300, + 666 + ], + "spans": [ + { + "bbox": [ + 45, + 606, + 300, + 666 + ], + "type": "text", + "content": "To answer Question 2 and 3, we compare the policies trained on generated demonstrations and manually collected demonstrations in terms of their success rates when facing various object poses. Moreover, we explore the performance of policies as generated data gradually scale up." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 666, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 666, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 666, + 301, + 727 + ], + "type": "text", + "content": "The main results of the experiment are illustrated in Fig. 7. While policies trained on real-world demonstrations still have an edge over those trained on the same number of generated ones, our method manifests salient improvement in success rate as the generated demonstrations scale up. Concretely," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "spans": [ + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "text", + "content": "visuomotor policies trained on 800 generated demonstrations achieve comparable performance to those trained on 200 manually collected demonstrations. Moreover, training with 1800 generated demonstrations raises the success rate to an average of " + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "inline_equation", + "content": "94.7\\%" + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "text", + "content": ", significantly surpassing the success rate achieved with 200 manually collected demonstrations. It is also worth mentioning that the policy achieves a " + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "inline_equation", + "content": "96.7\\%" + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "text", + "content": " success rate on Dual Pick-Place task with our generated data, which is nearly " + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 307, + 191, + 564, + 346 + ], + "type": "text", + "content": " higher than the baseline (manually collected). These findings testify the effectiveness of our method in generating novel object poses for better generalization of visuomotor policies, and indicate promising scaling property as generated data scales up." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 357, + 547, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 357, + 547, + 369 + ], + "spans": [ + { + "bbox": [ + 309, + 357, + 547, + 369 + ], + "type": "text", + "content": "D. Robustness when Facing Various Deployment Settings" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 373, + 563, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 373, + 563, + 456 + ], + "spans": [ + { + "bbox": [ + 308, + 373, + 563, + 456 + ], + "type": "text", + "content": "To answer Question 4, we augment the expert demonstration in five different dimensions: lighting conditions, scene appearance, camera views, object types, and embodiment types. We compare policies trained on real-world data, real-world data augmented using 2D augmentation approaches, and data generated via our pipeline. An illustration of the experiments for different generalization types is shown in Fig. 8." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 319, + 457, + 413, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 457, + 413, + 468 + ], + "spans": [ + { + "bbox": [ + 319, + 457, + 413, + 468 + ], + "type": "text", + "content": "1) Lighting Condition" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 469, + 563, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 469, + 563, + 517 + ], + "spans": [ + { + "bbox": [ + 308, + 469, + 563, + 517 + ], + "type": "text", + "content": "To demonstrate the effectiveness of lighting augmentation in our approach, we adopt five different scenarios for policy deployment, which are shown in Appendix E. We compare the performance of four policies that are trained respectively on:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 520, + 564, + 615 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 315, + 520, + 509, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 520, + 509, + 531 + ], + "spans": [ + { + "bbox": [ + 315, + 520, + 509, + 531 + ], + "type": "text", + "content": "1) 200 real-world demonstrations (Collected);" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 532, + 564, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 532, + 564, + 567 + ], + "spans": [ + { + "bbox": [ + 315, + 532, + 564, + 567 + ], + "type": "text", + "content": "2) 1800 generated demonstrations with only object pose augmentation, which are the same as data used in V-C (Ours Pose-Only);" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 567, + 563, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 567, + 563, + 591 + ], + "spans": [ + { + "bbox": [ + 315, + 567, + 563, + 591 + ], + "type": "text", + "content": "3) real-world demonstrations augmented with color jitter (Color Jitter);" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 591, + 563, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 591, + 563, + 615 + ], + "spans": [ + { + "bbox": [ + 315, + 591, + 563, + 615 + ], + "type": "text", + "content": "4) 3200 demonstrations generated by our pipeline with both lighting condition and object pose augmentation (Ours)." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "type": "text", + "content": "As shown in Fig. 9, policies trained on augmented lighting conditions achieve an average of over " + }, + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "type": "text", + "content": " success rate across Pick Object, Close Driver, and Pick-Place-Close tasks, with an overall improvement over those trained on real-world data without augmentation by " + }, + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 308, + 617, + 564, + 727 + ], + "type": "text", + "content": ". Furthermore, our policies show a significant edge over those trained on generated demonstrations with augmented object poses and real-world demonstrations augmented with color jitter, justifying the validity of lighting augmentation in our pipeline." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 82, + 526, + 133 + ], + "blocks": [ + { + "bbox": [ + 45, + 52, + 564, + 77 + ], + "lines": [ + { + "bbox": [ + 45, + 52, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 45, + 52, + 564, + 77 + ], + "type": "text", + "content": "TABLE I: Comparison of demonstration collection time (s). We calculate the average time cost of data collection of a single demonstration over 100 demonstrations. Our method achieves more than 29 times the speed compared to the baseline." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 77, + 82, + 526, + 133 + ], + "lines": [ + { + "bbox": [ + 77, + 82, + 526, + 133 + ], + "spans": [ + { + "bbox": [ + 77, + 82, + 526, + 133 + ], + "type": "table", + "html": "
Task TypePick ObjectClose PrinterPick-Place-PrintDual Pick-PlaceSweepAverage
Real-world13.210.124.727.020.419.1
Ours0.430.340.861.00.580.64
", + "image_path": "b769051aa8353358bbea6381920c97be9cab33e0f3548ab34a755084f2d2e49a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 73, + 144, + 231, + 234 + ], + "blocks": [ + { + "bbox": [ + 73, + 144, + 231, + 234 + ], + "lines": [ + { + "bbox": [ + 73, + 144, + 231, + 234 + ], + "spans": [ + { + "bbox": [ + 73, + 144, + 231, + 234 + ], + "type": "image", + "image_path": "7c0ee42e2207295b362773c05b96cce7bdf9cb3c5a14fb57c35ec76aca30ed78.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 232, + 144, + 380, + 229 + ], + "blocks": [ + { + "bbox": [ + 232, + 144, + 380, + 229 + ], + "lines": [ + { + "bbox": [ + 232, + 144, + 380, + 229 + ], + "spans": [ + { + "bbox": [ + 232, + 144, + 380, + 229 + ], + "type": "image", + "image_path": "ed7242ed314a3d14a5e43aeeb9a806ab6c3d6c1e23699bf13089e97f9fecf66c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 382, + 144, + 531, + 229 + ], + "blocks": [ + { + "bbox": [ + 382, + 144, + 531, + 229 + ], + "lines": [ + { + "bbox": [ + 382, + 144, + 531, + 229 + ], + "spans": [ + { + "bbox": [ + 382, + 144, + 531, + 229 + ], + "type": "image", + "image_path": "0c0dd19ef5080d4329d728c1edeb0a459f24a88a67d8519ac275762bf8ef0154.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 82, + 235, + 230, + 320 + ], + "blocks": [ + { + "bbox": [ + 82, + 235, + 230, + 320 + ], + "lines": [ + { + "bbox": [ + 82, + 235, + 230, + 320 + ], + "spans": [ + { + "bbox": [ + 82, + 235, + 230, + 320 + ], + "type": "image", + "image_path": "d412f2e90064cb5e4add7b49aa82fb97fd4fd497c8072ee8f2634f614d4c1693.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 327, + 565, + 365 + ], + "lines": [ + { + "bbox": [ + 45, + 327, + 565, + 365 + ], + "spans": [ + { + "bbox": [ + 45, + 327, + 565, + 365 + ], + "type": "text", + "content": "Fig. 7: Main results. Top left: We present the average success rate across five tasks. Our method shows promising scalability as the number of demonstration grows. The other five subfigures: For each task, we evaluate the success rate of policies trained from manually collected data and those generated by our method over 30 trials, using different number of demonstrations." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 232, + 235, + 380, + 320 + ], + "blocks": [ + { + "bbox": [ + 232, + 235, + 380, + 320 + ], + "lines": [ + { + "bbox": [ + 232, + 235, + 380, + 320 + ], + "spans": [ + { + "bbox": [ + 232, + 235, + 380, + 320 + ], + "type": "image", + "image_path": "c99791955c59958e854fdf45de3a0ffb3663991c619063f2566d76acdee2993c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 383, + 235, + 531, + 320 + ], + "blocks": [ + { + "bbox": [ + 383, + 235, + 531, + 320 + ], + "lines": [ + { + "bbox": [ + 383, + 235, + 531, + 320 + ], + "spans": [ + { + "bbox": [ + 383, + 235, + 531, + 320 + ], + "type": "image", + "image_path": "204cc20f13470490e19076be5708bdccc00578f619442b1173ebe8c8a84f6755.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 380, + 148, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 380, + 148, + 391 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 148, + 391 + ], + "type": "text", + "content": "2) Scene Appearance" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "type": "text", + "content": "Similar to the experiment on lighting conditions, we select five different scenarios for evaluation on scene appearance augmentation, which is illustrated in Appendix E. The four policies for comparison are trained in a similar manner as described in Sec. V-D1, with the key difference being that we employ image inpainting methods [68, 9, 67, 10] as more robust and suitable 2D augmentation baselines for appearance generalization. The results are shown in Fig. 9. The policy trained on data generated through our pipeline, incorporating both appearance and object pose augmentations, achieves superior performance compared to all baselines. Notably, it demonstrates over a " + }, + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "type": "text", + "content": " increase in success rates across all three tasks when compared to policies trained on data without appearance augmentation. In particular, our policy achieves " + }, + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 45, + 391, + 301, + 581 + ], + "type": "text", + "content": " success rate on the Pick Object task, showcasing strong robustness against various background appearance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 582, + 128, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 128, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 128, + 593 + ], + "type": "text", + "content": "3) Camera View" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 594, + 300, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 594, + 300, + 677 + ], + "spans": [ + { + "bbox": [ + 45, + 594, + 300, + 677 + ], + "type": "text", + "content": "We employ two different settings for camera view generalization: novel view and moving view. In novel view experiments, we select 30 poses for each camera, which are different from the training perspective. On the other hand, cameras are kept moving in moving view experiments. Similar to Sec. V-D1 and Sec. V-D2, we compare the performance of four policies that are trained respectively on:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 678, + 300, + 725 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 678, + 247, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 678, + 247, + 689 + ], + "spans": [ + { + "bbox": [ + 53, + 678, + 247, + 689 + ], + "type": "text", + "content": "1) 200 real-world demonstrations (Collected);" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 689, + 299, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 689, + 299, + 713 + ], + "spans": [ + { + "bbox": [ + 52, + 689, + 299, + 713 + ], + "type": "text", + "content": "2) 1800 generated demonstrations with only object pose augmentation (Ours Pose-Only);" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 714, + 300, + 725 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 714, + 300, + 725 + ], + "spans": [ + { + "bbox": [ + 52, + 714, + 300, + 725 + ], + "type": "text", + "content": "3) 3200 demonstrations stemmed from 200 real-world" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 328, + 379, + 564, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 379, + 564, + 415 + ], + "spans": [ + { + "bbox": [ + 328, + 379, + 564, + 415 + ], + "type": "text", + "content": "demonstrations, augmented using VISTA [50], which leverages novel view synthesis models to augment data from different views;" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 415, + 563, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 415, + 563, + 439 + ], + "spans": [ + { + "bbox": [ + 315, + 415, + 563, + 439 + ], + "type": "text", + "content": "4) 3200 generated demonstrations with camera view augmentation (Ours)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "text", + "content": "We present the results in Table II. Our policy is able to perform Pick Object task and Pick-Place-Close task with success rates of over " + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "text", + "content": " respectively, while the policies trained on data without augmentation can barely accomplish the task. Our approach also outperforms VISTA by a large margin. Notably, our policy achieves nearly " + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 308, + 441, + 564, + 538 + ], + "type": "text", + "content": " success rate on CloseDrawer task, manifesting strong robustness against novel camera views and moving cameras." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 318, + 540, + 385, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 540, + 385, + 551 + ], + "spans": [ + { + "bbox": [ + 318, + 540, + 385, + 551 + ], + "type": "text", + "content": "4) Object Type" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 552, + 563, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 552, + 563, + 588 + ], + "spans": [ + { + "bbox": [ + 308, + 552, + 563, + 588 + ], + "type": "text", + "content": "In order to demonstrate the effectiveness of our method in augmenting object types, we compare the performance of three different policies that are respectively trained on:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 590, + 563, + 687 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 315, + 590, + 563, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 590, + 563, + 613 + ], + "spans": [ + { + "bbox": [ + 315, + 590, + 563, + 613 + ], + "type": "text", + "content": "1) 400 real-world demonstrations with 5 real-world objects (Collected);" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 615, + 563, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 615, + 563, + 662 + ], + "spans": [ + { + "bbox": [ + 315, + 615, + 563, + 662 + ], + "type": "text", + "content": "2) 6400 demonstrations stemmed from 200 real-world demonstrations, augmented using ROSIE [67], which utilizes image inpainting models to generate data with unseen objects;" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 662, + 563, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 662, + 563, + 687 + ], + "spans": [ + { + "bbox": [ + 315, + 662, + 563, + 687 + ], + "type": "text", + "content": "3) 6400 demonstrations generated by our pipeline with object type augmentation (Ours)." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 308, + 689, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 564, + 727 + ], + "type": "text", + "content": "During deployment, we select five real-word objects that are different from all the objects covered in training process. We report the result in Fig. 10. The policy trained on 50 object" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 54, + 541, + 232 + ], + "blocks": [ + { + "bbox": [ + 63, + 54, + 541, + 232 + ], + "lines": [ + { + "bbox": [ + 63, + 54, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 63, + 54, + 541, + 232 + ], + "type": "image", + "image_path": "5fc498dcd53b37e9bf2a31cb0c5a87fdec8b68b0eeafba95226135e5bb3b20ad.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 239, + 565, + 277 + ], + "lines": [ + { + "bbox": [ + 44, + 239, + 565, + 277 + ], + "spans": [ + { + "bbox": [ + 44, + 239, + 565, + 277 + ], + "type": "text", + "content": "Fig. 8: Illustration of real-world experiments for different generalization types. The data is collected in the original setting. When deploying the trained policy, we modify object poses, lighting conditions, scene appearance, camera views, object types, and embodiments to evaluate the robustness in different scenarios." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 289, + 312, + 470 + ], + "blocks": [ + { + "bbox": [ + 59, + 289, + 312, + 470 + ], + "lines": [ + { + "bbox": [ + 59, + 289, + 312, + 470 + ], + "spans": [ + { + "bbox": [ + 59, + 289, + 312, + 470 + ], + "type": "image", + "image_path": "2e3c1d8bf3f055ec267b19fbbcf4be774fe8fdaaaf18d9c9511fd4775d308de0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 476, + 565, + 515 + ], + "lines": [ + { + "bbox": [ + 44, + 476, + 565, + 515 + ], + "spans": [ + { + "bbox": [ + 44, + 476, + 565, + 515 + ], + "type": "text", + "content": "Fig. 9: Performance when changing lighting conditions and appearance. We report the success rate of different policies under various lighting conditions and appearance. The policies trained with generated demonstrations with corresponding augmentations manifest remarkable advance compared to baseline policies." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 317, + 287, + 547, + 469 + ], + "blocks": [ + { + "bbox": [ + 317, + 287, + 547, + 469 + ], + "lines": [ + { + "bbox": [ + 317, + 287, + 547, + 469 + ], + "spans": [ + { + "bbox": [ + 317, + 287, + 547, + 469 + ], + "type": "image", + "image_path": "593193a036e1192cd5ed93966c35e5f47f82a3d6aec90ed02ce3e52b86db2fbb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 525, + 302, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 525, + 302, + 586 + ], + "spans": [ + { + "bbox": [ + 45, + 525, + 302, + 586 + ], + "type": "text", + "content": "types showcases better adaptability to novel object types, improving the success rate of baseline models by over " + }, + { + "bbox": [ + 45, + 525, + 302, + 586 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 45, + 525, + 302, + 586 + ], + "type": "text", + "content": ". This demonstrates the effectiveness of our data generation pipeline in utilizing off-the-shelf 3D Content Generation models to generalize policy to novel objects." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 55, + 599, + 289, + 693 + ], + "blocks": [ + { + "bbox": [ + 55, + 599, + 289, + 693 + ], + "lines": [ + { + "bbox": [ + 55, + 599, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 599, + 289, + 693 + ], + "type": "image", + "image_path": "b5af8d67c630930530fc45bc66a04dbfea14f7bd87c0b73e0f0ba1c940517e9f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 693, + 301, + 728 + ], + "lines": [ + { + "bbox": [ + 45, + 693, + 301, + 728 + ], + "spans": [ + { + "bbox": [ + 45, + 693, + 301, + 728 + ], + "type": "text", + "content": "Fig. 10: Performance on novel object types. The policy trained on data generated by RoboSplat shows a salient edge over baseline policies." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 318, + 525, + 409, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 525, + 409, + 538 + ], + "spans": [ + { + "bbox": [ + 318, + 525, + 409, + 538 + ], + "type": "text", + "content": "5) Embodiment Type" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 542, + 564, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 542, + 564, + 697 + ], + "spans": [ + { + "bbox": [ + 307, + 542, + 564, + 697 + ], + "type": "text", + "content": "Our method supports generating demonstrations across different embodiment types as mentioned in Sec. IV-B4. To prove that, based on one demonstration collected with the Franka Research 3, we generate novel demonstrations for a UR5e robot equipped with a Robotiq 2F-85 gripper and deploy the learned policy directly in the real world. It is worth noting that policies trained on Franka Research 3 robot demonstrations fail to be deployed on UR5e robot due to frequent safety violations. We compare the performance of policies trained on embodiment-augmented demonstrations with those trained on data augmented using RoVi-Aug [8]. RoVi-Aug modifies real-world demonstrations by replacing the appearance of the embodiment through generative models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 701, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 701, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 701, + 564, + 727 + ], + "type": "text", + "content": "We present the performance of policies in Fig. 11. Policies trained on data generated using our pipeline achieve a success" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 94, + 565, + 184 + ], + "blocks": [ + { + "bbox": [ + 45, + 53, + 565, + 89 + ], + "lines": [ + { + "bbox": [ + 45, + 53, + 565, + 89 + ], + "spans": [ + { + "bbox": [ + 45, + 53, + 565, + 89 + ], + "type": "text", + "content": "TABLE II: Performance when changing camera view. We compare the success rate of different policies under two circumstances: novel camera view and moving camera view. The policies trained on demonstrations augmented using our approach showcase significant improvement over baseline policies." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 94, + 565, + 184 + ], + "lines": [ + { + "bbox": [ + 50, + 94, + 565, + 184 + ], + "spans": [ + { + "bbox": [ + 50, + 94, + 565, + 184 + ], + "type": "table", + "html": "
Data SourcePick ObjectClose PrinterPick-Place-CloseAverage
Novel ViewMoving ViewNovel ViewMoving ViewNovel ViewMoving View
Collected6.70.016.713.30.00.06.1
Ours Pose-Only0.00.026.730.00.00.09.5
VISTA [50]33.333.356.770.033.316.740.6
Ours90.086.7100.096.753.356.780.6
", + "image_path": "db06a771fc3c7e7fb1af214d8d12133a1d23f93de90106fc8bc75691ee37af30.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 206, + 301, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 206, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 45, + 206, + 301, + 255 + ], + "type": "text", + "content": "rate close to " + }, + { + "bbox": [ + 45, + 206, + 301, + 255 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 45, + 206, + 301, + 255 + ], + "type": "text", + "content": " on an embodiment different from the one used for demonstration collection. This result highlights its superior performance compared to the baseline in cross-embodiment transfer." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 48, + 266, + 299, + 363 + ], + "blocks": [ + { + "bbox": [ + 48, + 266, + 299, + 363 + ], + "lines": [ + { + "bbox": [ + 48, + 266, + 299, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 266, + 299, + 363 + ], + "type": "image", + "image_path": "f44bde63f046c6bacfede4ff3776e2d1fd5281da40a9fa7ceabbe1c7f488d922.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 369, + 301, + 418 + ], + "lines": [ + { + "bbox": [ + 45, + 369, + 301, + 418 + ], + "spans": [ + { + "bbox": [ + 45, + 369, + 301, + 418 + ], + "type": "text", + "content": "Fig. 11: Performance on cross embodiment experiments. We evaluate the learned policy directly on the UR5e robot and achieve a nearly " + }, + { + "bbox": [ + 45, + 369, + 301, + 418 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 45, + 369, + 301, + 418 + ], + "type": "text", + "content": " success rate that surpasses the 2D augmentation methods." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 442, + 212, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 442, + 212, + 453 + ], + "spans": [ + { + "bbox": [ + 134, + 442, + 212, + 453 + ], + "type": "text", + "content": "VI. LIMITATIONS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 459, + 300, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 459, + 300, + 543 + ], + "spans": [ + { + "bbox": [ + 45, + 459, + 300, + 543 + ], + "type": "text", + "content": "Due to the limitations of naive 3D Gaussian Splatting, it is incapable of handling deformable objects. Additionally, the pipeline lacks physical constraints, making it unsuitable for contact-rich and dynamic tasks. However, recent advancements in Gaussian Splatting [58, 1, 64, 42] provide promising opportunities to address these challenges. Future work could apply these techniques to generate data for a wider range of tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 552, + 216, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 552, + 216, + 562 + ], + "spans": [ + { + "bbox": [ + 131, + 552, + 216, + 562 + ], + "type": "text", + "content": "VII. CONCLUSION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 568, + 301, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 568, + 301, + 640 + ], + "spans": [ + { + "bbox": [ + 45, + 568, + 301, + 640 + ], + "type": "text", + "content": "In this work, we introduce RoboSplat, a novel demonstration generation approach that requires only a single collected demonstration and generates diverse and high-quality data for policy learning. Comprehensive real-world experiments show that our approach significantly enhances the robustness of visuomotor policies when encountering various disturbances." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 126, + 650, + 221, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 650, + 221, + 659 + ], + "spans": [ + { + "bbox": [ + 126, + 650, + 221, + 659 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 666, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 666, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 666, + 301, + 727 + ], + "type": "text", + "content": "We sincerely thank Yang Tian and Xiao Chen for their fruitful discussions. This work is supported by the National Key R&D Program of China (2022ZD0160201), Shanghai Artificial Intelligence Laboratory, and China Postdoctoral Science Foundation (2023M741848)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 407, + 207, + 465, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 207, + 465, + 217 + ], + "spans": [ + { + "bbox": [ + 407, + 207, + 465, + 217 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 224, + 564, + 727 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 315, + 224, + 564, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 224, + 564, + 283 + ], + "spans": [ + { + "bbox": [ + 315, + 224, + 564, + 283 + ], + "type": "text", + "content": "[1] Jad Abou-Chakra, Krishan Rana, Feras Dayoub, and Niko Suenderhauf. Physically embodied gaussian splatt-ting: A visually learnt and physically grounded 3d representation for robotics. In 8th Annual Conference on Robot Learning, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 284, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 284, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 315, + 284, + 564, + 342 + ], + "type": "text", + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 343, + 563, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 343, + 563, + 390 + ], + "spans": [ + { + "bbox": [ + 315, + 343, + 563, + 390 + ], + "type": "text", + "content": "[3] Ezra Ameperosa, Jeremy A Collins, Mrinal Jain, and Animesh Garg. Rocoda: Counterfactual data augmentation for data-efficient robot learning from demonstrations. arXiv preprint arXiv:2411.16959, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 391, + 564, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 391, + 564, + 450 + ], + "spans": [ + { + "bbox": [ + 315, + 391, + 564, + 450 + ], + "type": "text", + "content": "[4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5470–5479, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 451, + 563, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 451, + 563, + 497 + ], + "spans": [ + { + "bbox": [ + 316, + 451, + 563, + 497 + ], + "type": "text", + "content": "[5] Paul J Besl and Neil D McKay. Method for registration of 3-d shapes. In Sensor fusion IV: control paradigms and data structures, volume 1611, pages 586-606. Spie, 1992." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 498, + 563, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 498, + 563, + 569 + ], + "spans": [ + { + "bbox": [ + 316, + 498, + 563, + 569 + ], + "type": "text", + "content": "[6] Ondrej Biza, Skye Thompson, Kishore Reddy Pagidi, Abhinav Kumar, Elise van der Pol, Robin Walters, Thomas Kipf, Jan-Willem van de Meent, Lawson LS Wong, and Robert Platt. One-shot imitation learning via interaction warping. arXiv preprint arXiv:2306.12392, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 571, + 563, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 571, + 563, + 640 + ], + "spans": [ + { + "bbox": [ + 316, + 571, + 563, + 640 + ], + "type": "text", + "content": "[7] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski, Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, et al. Rt-2: Vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 642, + 563, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 642, + 563, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 642, + 563, + 713 + ], + "type": "text", + "content": "[8] Lawrence Yunliang Chen, Chenfeng Xu, Karthik Dharmarajan, Muhammad Zubair Irshad, Richard Cheng, Kurt Keutzer, Masayoshi Tomizuka, Quan Vuong, and Ken Goldberg. Rovi-aug: Robot and viewpoint augmentation for cross-embodiment robot learning. arXiv preprint arXiv:2409.03403, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 714, + 563, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 714, + 563, + 727 + ], + "spans": [ + { + "bbox": [ + 315, + 714, + 563, + 727 + ], + "type": "text", + "content": "[9] Zoey Chen, Sho Kiami, Abhishek Gupta, and Vikash" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 727 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 68, + 57, + 300, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 57, + 300, + 91 + ], + "spans": [ + { + "bbox": [ + 68, + 57, + 300, + 91 + ], + "type": "text", + "content": "Kumar. Genaug: Retargeting behaviors to unseen situations via generative augmentation. arXiv preprint arXiv:2302.06671, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 92, + 301, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 92, + 301, + 152 + ], + "spans": [ + { + "bbox": [ + 47, + 92, + 301, + 152 + ], + "type": "text", + "content": "[10] Zoey Chen, Zhao Mandi, Homanga Bharadhwaj, Mohit Sharma, Shuran Song, Abhishek Gupta, and Vikash Kumar. Semantically controllable augmentations for generalizable robot learning. The International Journal of Robotics Research, page 02783649241273686, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 152, + 301, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 152, + 301, + 212 + ], + "spans": [ + { + "bbox": [ + 47, + 152, + 301, + 212 + ], + "type": "text", + "content": "[11] Cheng Chi, Zhenjia Xu, Siyuan Feng, Eric Cousineau, Yilun Du, Benjamin Burchfiel, Russ Tedrake, and Shuran Song. Diffusion policy: Visuomotor policy learning via action diffusion. The International Journal of Robotics Research, page 02783649241273668, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 212, + 301, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 212, + 301, + 272 + ], + "spans": [ + { + "bbox": [ + 47, + 212, + 301, + 272 + ], + "type": "text", + "content": "[12] Cheng Chi, Zhenjia Xu, Chuer Pan, Eric Cousineau, Benjamin Burchfiel, Siyuan Feng, Russ Tedrake, and Shuran Song. Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots. arXiv preprint arXiv:2402.10329, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 272, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 272, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 272, + 301, + 342 + ], + "type": "text", + "content": "[13] Ethan Chun, Yilun Du, Anthony Simeonov, Tomas Lozano-Perez, and Leslie Kaelbling. Local neural descriptor fields: Locally conditioned object representations for manipulation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 1830-1836. IEEE, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 343, + 301, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 343, + 301, + 392 + ], + "spans": [ + { + "bbox": [ + 47, + 343, + 301, + 392 + ], + "type": "text", + "content": "[14] Murtaza Dalal, Min Liu, Walter Talbott, Chen Chen, Deepak Pathak, Jian Zhang, and Ruslan Salakhutdinov. Local policies enable zero-shot long-horizon manipulation. arXiv preprint arXiv:2410.22332, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 392, + 301, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 301, + 439 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 301, + 439 + ], + "type": "text", + "content": "[15] Linxi Fan, Guanzhi Wang, De-An Huang, Zhiding Yu, Li Fei-Fei, Yuke Zhu, and Anima Anandkumar. Secant: Self-expert cloning for zero-shot generalization of visual policies. arXiv preprint arXiv:2106.09678, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 439, + 301, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 439, + 301, + 498 + ], + "spans": [ + { + "bbox": [ + 47, + 439, + 301, + 498 + ], + "type": "text", + "content": "[16] Hao-Shu Fang, Chenxi Wang, Hongjie Fang, Minghao Gou, Jirong Liu, Hengxu Yan, Wenhai Liu, Yichen Xie, and Cewu Lu. Anygrasp: Robust and efficient grasp perception in spatial and temporal domains. IEEE Transactions on Robotics, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 498, + 301, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 498, + 301, + 559 + ], + "spans": [ + { + "bbox": [ + 47, + 498, + 301, + 559 + ], + "type": "text", + "content": "[17] Jian Gao, Chun Gu, Youtian Lin, Zhihao Li, Hao Zhu, Xun Cao, Li Zhang, and Yao Yao. Relightable 3d gaussians: Realistic point cloud relighting with brdf decomposition and ray tracing. In European Conference on Computer Vision, pages 73-89. Springer, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 559, + 301, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 301, + 594 + ], + "type": "text", + "content": "[18] Siddhant Haldar, Zhuoran Peng, and Lerrel Pinto. Baku: An efficient transformer for multi-task policy learning. arXiv preprint arXiv:2406.07539, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 594, + 301, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 301, + 642 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 301, + 642 + ], + "type": "text", + "content": "[19] Nicklas Hansen and Xiaolong Wang. Generalization in reinforcement learning by soft data augmentation. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13611-13617. IEEE, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 642, + 301, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 301, + 690 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 301, + 690 + ], + "type": "text", + "content": "[20] Nicklas Hansen, Hao Su, and Xiaolong Wang. Stabilizing deep q-learning with convnets and vision transformers under data augmentation. Advances in neural information processing systems, 34:3680-3693, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 690, + 301, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 690, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 47, + 690, + 301, + 727 + ], + "type": "text", + "content": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 311, + 57, + 564, + 727 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 331, + 57, + 523, + 68 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 57, + 523, + 68 + ], + "spans": [ + { + "bbox": [ + 331, + 57, + 523, + 68 + ], + "type": "text", + "content": "and pattern recognition, pages 770-778, 2016." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 311, + 68, + 564, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 68, + 564, + 116 + ], + "spans": [ + { + "bbox": [ + 311, + 68, + 564, + 116 + ], + "type": "text", + "content": "[22] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (ToG), 37(6):1-15, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 116, + 564, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 116, + 564, + 186 + ], + "spans": [ + { + "bbox": [ + 311, + 116, + 564, + 186 + ], + "type": "text", + "content": "[23] Alex Irpan, Alexander Herzog, Alexander Toshkov Toshev, Andy Zeng, Anthony Brohan, Brian Andrew Ichter, Byron David, Carolina Parada, Chelsea Finn, Clayton Tan, et al. Do as i can, not as i say: Grounding language in robotic affordances. In Conference on Robot Learning, number 2022, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 311, + 188, + 564, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 188, + 564, + 224 + ], + "spans": [ + { + "bbox": [ + 311, + 188, + 564, + 224 + ], + "type": "text", + "content": "[24] Mazeyu Ji, Ri-Zhao Qiu, Xueyan Zou, and Xiaolong Wang. Graspsplats: Efficient manipulation with 3d feature splatting. arXiv preprint arXiv:2409.02084, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 311, + 224, + 564, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 224, + 564, + 271 + ], + "spans": [ + { + "bbox": [ + 311, + 224, + 564, + 271 + ], + "type": "text", + "content": "[25] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 311, + 271, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 271, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 311, + 271, + 564, + 319 + ], + "type": "text", + "content": "[26] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 319, + 564, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 319, + 564, + 368 + ], + "spans": [ + { + "bbox": [ + 311, + 319, + 564, + 368 + ], + "type": "text", + "content": "[27] Georgios Kopanas, Thomas Leimkuhler, Gilles Rainer, Clément Jambon, and George Drettakis. Neural point catauastics for novel-view synthesis of reflections. ACM Transactions on Graphics (TOG), 41(6):1-15, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 368, + 564, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 368, + 564, + 415 + ], + "spans": [ + { + "bbox": [ + 311, + 368, + 564, + 415 + ], + "type": "text", + "content": "[28] Ilya Kostrikov, Denis Yarats, and Rob Fergus. Image augmentation is all you need: Regularizing deep reinforcement learning from pixels. arXiv preprint arXiv:2004.13649, 2020." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 415, + 564, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 415, + 564, + 463 + ], + "spans": [ + { + "bbox": [ + 311, + 415, + 564, + 463 + ], + "type": "text", + "content": "[29] Misha Laskin, Kimin Lee, Adam Stooke, Lerrel Pinto, Pieter Abbeel, and Aravind Srinivas. Reinforcement learning with augmented data. Advances in neural information processing systems, 33:19884-19895, 2020." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 462, + 564, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 462, + 564, + 510 + ], + "spans": [ + { + "bbox": [ + 311, + 462, + 564, + 510 + ], + "type": "text", + "content": "[30] Mara Levy, Siddhant Haldar, Lerrel Pinto, and Abhinav Shirivastava. P3-po: Prescriptive point priors for visuospatial generalization of robot policies. arXiv preprint arXiv:2412.06784, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 311, + 510, + 564, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 510, + 564, + 569 + ], + "spans": [ + { + "bbox": [ + 311, + 510, + 564, + 569 + ], + "type": "text", + "content": "[31] Xinhai Li, Jialin Li, Ziheng Zhang, Rui Zhang, Fan Jia, Tiancai Wang, Haoqiang Fan, Kuo-Kun Tseng, and Ruiping Wang. Robogsim: A real2sim2real robotic gaussian splatting simulator. arXiv preprint arXiv:2411.11839, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 311, + 570, + 564, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 570, + 564, + 629 + ], + "spans": [ + { + "bbox": [ + 311, + 570, + 564, + 629 + ], + "type": "text", + "content": "[32] Zhihao Liang, Qi Zhang, Ying Feng, Ying Shan, and Kui Jia. Gs-ir: 3d gaussian splatting for inverse rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21644–21653, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 311, + 630, + 564, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 630, + 564, + 714 + ], + "spans": [ + { + "bbox": [ + 311, + 630, + 564, + 714 + ], + "type": "text", + "content": "[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 311, + 714, + 564, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 714, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 311, + 714, + 564, + 727 + ], + "type": "text", + "content": "[34] Guanxing Lu, Shiyi Zhang, Ziwei Wang, Changliu Liu," + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 726 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 67, + 57, + 301, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 57, + 301, + 103 + ], + "spans": [ + { + "bbox": [ + 67, + 57, + 301, + 103 + ], + "type": "text", + "content": "Jiwen Lu, and Yansong Tang. Manigaussian: Dynamic gaussian splatting for multi-task robotic manipulation. In European Conference on Computer Vision, pages 349-366. Springer, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 104, + 301, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 104, + 301, + 163 + ], + "spans": [ + { + "bbox": [ + 47, + 104, + 301, + 163 + ], + "type": "text", + "content": "[35] Zhao Mandi, Homanga Bharadhwaj, Vincent Moens, Shuran Song, Aravind Rajeswaran, and Vikash Kumar. Cacti: A framework for scalable multi-task multi-scene visual imitation learning. arXiv preprint arXiv:2212.05711, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 164, + 301, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 164, + 301, + 235 + ], + "spans": [ + { + "bbox": [ + 47, + 164, + 301, + 235 + ], + "type": "text", + "content": "[36] Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Rohun Kulkarni, Li Fei-Fei, Silvio Savarese, Yuke Zhu, and Roberto Martin-Martín. What matters in learning from offline human demonstrations for robot manipulation. arXiv preprint arXiv:2108.03298, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 236, + 301, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 301, + 295 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 301, + 295 + ], + "type": "text", + "content": "[37] Ajay Mandlekar, Soroush Nasiriany, Bowen Wen, Iretiayo Akinola, Yashraj Narang, Linxi Fan, Yuke Zhu, and Dieter Fox. Mimicgen: A data generation system for scalable robot learning using human demonstrations. arXiv preprint arXiv:2310.17596, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 296, + 301, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 301, + 390 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 301, + 390 + ], + "type": "text", + "content": "[38] Octo Model Team, Dibya Ghosh, Homer Walke, Karl Pertsch, Kevin Black, Oier Mees, Sudeep Dasari, Joel Hejna, Charles Xu, Jianlan Luo, Tobias Kreiman, You Liang Tan, Lawrence Yunliang Chen, Pannag Sanketi, Quan Vuong, Ted Xiao, Dorsa Sadigh, Chelsea Finn, and Sergey Levine. Octo: An open-source generalist robot policy. In Proceedings of Robotics: Science and Systems, Delft, Netherlands, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 391, + 301, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 391, + 301, + 450 + ], + "spans": [ + { + "bbox": [ + 47, + 391, + 301, + 450 + ], + "type": "text", + "content": "[39] Abby O'Neill, Abdul Rehman, Abhinav Gupta, Abhiram Maddukuri, Abhishek Gupta, Abhishek Padalkar, Abraham Lee, Acorn Pooley, Agrim Gupta, Ajay Mandlekar, et al. Open x-embodiment: Robotic learning datasets and rt-x models. arXiv preprint arXiv:2310.08864, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 451, + 301, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 451, + 301, + 510 + ], + "spans": [ + { + "bbox": [ + 47, + 451, + 301, + 510 + ], + "type": "text", + "content": "[40] Mohammad Nomaan Qureshi, Sparsh Garg, Francisco Yandun, David Held, George Kantor, and Abhisesh Silwal. Splatsim: Zero-shot sim2real transfer of rgb manipulation policies using gaussian splatting. arXiv preprint arXiv:2409.10161, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 510, + 301, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 510, + 301, + 570 + ], + "spans": [ + { + "bbox": [ + 47, + 510, + 301, + 570 + ], + "type": "text", + "content": "[41] Tianhe Ren, Shilong Liu, Ailing Zeng, Jing Lin, Kun-chang Li, He Cao, Jiayu Chen, Xinyu Huang, Yukang Chen, Feng Yan, et al. Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 571, + 301, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 571, + 301, + 640 + ], + "spans": [ + { + "bbox": [ + 47, + 571, + 301, + 640 + ], + "type": "text", + "content": "[42] Boxiang Rong, Artur Grigorev, Wenbo Wang, Michael J Black, Bernhard Thomaszewski, Christina Tsalicoglou, and Otmar Hilliges. Gaussian garments: Reconstructing simulation-ready clothing with photorealistic appearance from multi-view video. arXiv preprint arXiv:2409.08189, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 642, + 301, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 301, + 701 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 301, + 701 + ], + "type": "text", + "content": "[43] Hyunwoo Ryu, Hong-in Lee, Jeong-Hoon Lee, and Jongeun Choi. Equivariant descriptor fields: Se (3)-equivariant energy-based models for end-to-end visual robotic manipulation learning. arXiv preprint arXiv:2206.08321, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 702, + 301, + 726 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 702, + 301, + 726 + ], + "spans": [ + { + "bbox": [ + 47, + 702, + 301, + 726 + ], + "type": "text", + "content": "[44] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 57, + 564, + 726 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 331, + 57, + 563, + 68 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 57, + 563, + 68 + ], + "spans": [ + { + "bbox": [ + 331, + 57, + 563, + 68 + ], + "type": "text", + "content": "Computer Vision and Pattern Recognition (CVPR), 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 69, + 564, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 69, + 564, + 116 + ], + "spans": [ + { + "bbox": [ + 310, + 69, + 564, + 116 + ], + "type": "text", + "content": "[45] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 117, + 564, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 117, + 564, + 175 + ], + "spans": [ + { + "bbox": [ + 310, + 117, + 564, + 175 + ], + "type": "text", + "content": "[46] Younggyo Seo, Junsu Kim, Stephen James, Kimin Lee, Jinwoo Shin, and Pieter Abbeel. Multi-view masked world models for visual robotic manipulation. In International Conference on Machine Learning, pages 30613-30632. PMLR, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 176, + 564, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 176, + 564, + 246 + ], + "spans": [ + { + "bbox": [ + 310, + 176, + 564, + 246 + ], + "type": "text", + "content": "[47] Ola Shorinwa, Johnathan Tucker, Aliyah Smith, Aiden Swann, Timothy Chen, Roya Firoozi, Monroe Kennedy III, and Mac Schwager. Splat-mover: Multi-stage, open-vocabulary robotic manipulation via editable gaussian splatting. arXiv preprint arXiv:2405.04378, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 247, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 247, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 247, + 564, + 319 + ], + "type": "text", + "content": "[48] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. In 2022 International Conference on Robotics and Automation (ICRA), pages 6394-6400. IEEE, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 319, + 564, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 367 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 367 + ], + "type": "text", + "content": "[49] Ritvik Singh, Arthur Allshire, Ankur Handa, Nathan Ratliff, and Karl Van Wyk. Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands. arXiv preprint arXiv:2412.01791, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 368, + 564, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 368, + 564, + 415 + ], + "spans": [ + { + "bbox": [ + 310, + 368, + 564, + 415 + ], + "type": "text", + "content": "[50] Stephen Tian, Blake Wulfe, Kyle Sargent, Katherine Liu, Sergey Zakharov, Vitor Guizilini, and Jiajun Wu. View-invariant policy learning via zero-shot novel view synthesis. arXiv preprint arXiv:2409.03685, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 415, + 564, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 415, + 564, + 462 + ], + "spans": [ + { + "bbox": [ + 310, + 415, + 564, + 462 + ], + "type": "text", + "content": "[51] Yang Tian, Sizhe Yang, Jia Zeng, Ping Wang, Dahua Lin, Hao Dong, and Jiangmiao Pang. Predictive inverse dynamics models are scalable learners for robotic manipulation. arXiv preprint arXiv:2412.15109, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 463, + 564, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 463, + 564, + 521 + ], + "spans": [ + { + "bbox": [ + 310, + 463, + 564, + 521 + ], + "type": "text", + "content": "[52] Marcel Torne, Anthony Simeonov, Zechu Li, April Chan, Tao Chen, Abhishek Gupta, and Pulkit Agrawal. Reconciling reality through simulation: A real-to-sim-to-real approach for robust manipulation. arXiv preprint arXiv:2403.03949, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 522, + 564, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 522, + 564, + 558 + ], + "spans": [ + { + "bbox": [ + 310, + 522, + 564, + 558 + ], + "type": "text", + "content": "[53] Pietro Vitiello, Kamil Dreczkowski, and Edward Johns. One-shot imitation learning: A pose estimation perspective. arXiv preprint arXiv:2310.12077, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 559, + 564, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 564, + 594 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 564, + 594 + ], + "type": "text", + "content": "[54] Vitalis Vosylius and Edward Johns. Instant policy: Incontext imitation learning via graph diffusion. arXiv preprint arXiv:2411.12633, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 595, + 564, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 595, + 564, + 642 + ], + "spans": [ + { + "bbox": [ + 310, + 595, + 564, + 642 + ], + "type": "text", + "content": "[55] Hongtao Wu, Ya Jing, Chilam Cheang, Guangzeng Chen, Jiafeng Xu, Xinghang Li, Minghuan Liu, Hang Li, and Tao Kong. Unleashing large-scale video generative pretraining for visual robot manipulation, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "type": "text", + "content": "[56] Yuxuan Wu, Lei Pan, Wenhua Wu, Guangming Wang, Yanzi Miao, and Hesheng Wang. Rl-gsbridge: 3d gaussian splatting based real2sim2real method for robotic manipulation learning. arXiv preprint arXiv:2409.20291, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 702, + 564, + 726 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 702, + 564, + 726 + ], + "spans": [ + { + "bbox": [ + 310, + 702, + 564, + 726 + ], + "type": "text", + "content": "[57] Jianfeng Xiang, Zelong Lv, Sicheng Xu, Yu Deng, Ruicheng Wang, Bowen Zhang, Dong Chen, Xin Tong," + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 727 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 56, + 301, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 56, + 301, + 91 + ], + "spans": [ + { + "bbox": [ + 67, + 56, + 301, + 91 + ], + "type": "text", + "content": "and Jiaolong Yang. Structured 3d latents for scalable and versatile 3d generation. arXiv preprint arXiv:2412.01506, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 92, + 301, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 92, + 301, + 163 + ], + "spans": [ + { + "bbox": [ + 47, + 92, + 301, + 163 + ], + "type": "text", + "content": "[58] Tianyi Xie, Zeshun Zong, Yuxing Qiu, Xuan Li, Yutao Feng, Yin Yang, and Chenfanfu Jiang. Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4389-4398, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 164, + 301, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 164, + 301, + 223 + ], + "spans": [ + { + "bbox": [ + 47, + 164, + 301, + 223 + ], + "type": "text", + "content": "[59] Zhengrong Xue, Shuying Deng, Zhenyang Chen, Yixuan Wang, Zhecheng Yuan, and Huazhe Xu. Demogen: Synthetic demonstration generation for data-efficient visuomotor policy learning. arXiv preprint arXiv:2502.16932, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 224, + 301, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 224, + 301, + 282 + ], + "spans": [ + { + "bbox": [ + 47, + 224, + 301, + 282 + ], + "type": "text", + "content": "[60] Jingyun Yang, Zi-ang Cao, Congyue Deng, Rika Antonova, Shuran Song, and Jeannette Bohg. Equibot: Sim (3)-equivariant diffusion policy for generalizable and data efficient learning. arXiv preprint arXiv:2407.01479, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 284, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 301, + 354 + ], + "type": "text", + "content": "[61] Jingyun Yang, Congyue Deng, Jimmy Wu, Rika Antonova, Leonidas Guibas, and Jeannette Bohg. Equiv-act: Sim (3)-equivariant visuomotor policies beyond rigid object manipulation. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 9249–9255. IEEE, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 355, + 301, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 355, + 301, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 355, + 301, + 426 + ], + "type": "text", + "content": "[62] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 426, + 301, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 301, + 474 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 301, + 474 + ], + "type": "text", + "content": "[63] Sizhe Yang, Yanjie Ze, and Huazhe Xu. Movie: Visual model-based policy adaptation for view generalization. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 475, + 301, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 475, + 301, + 545 + ], + "spans": [ + { + "bbox": [ + 47, + 475, + 301, + 545 + ], + "type": "text", + "content": "[64] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20331-20341, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 547, + 301, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 547, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 547, + 301, + 594 + ], + "type": "text", + "content": "[65] Mingqiao Ye, Martin Danelljan, Fisher Yu, and Lei Ke. Gaussian grouping: Segment and edit anything in 3d scenes. In European Conference on Computer Vision, pages 162-179. Springer, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 595, + 301, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 595, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 47, + 595, + 301, + 653 + ], + "type": "text", + "content": "[66] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 654, + 301, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 713 + ], + "type": "text", + "content": "[67] Tianhe Yu, Ted Xiao, Austin Stone, Jonathan Tompson, Anthony Brohan, Su Wang, Jaspiar Singh, Clayton Tan, Jodilyn Peralta, Brian Ichter, et al. Scaling robot learning with semantically imagined experience. arXiv preprint arXiv:2302.11550, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 714, + 301, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 714, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 47, + 714, + 301, + 727 + ], + "type": "text", + "content": "[68] Chengbo Yuan, Suraj Joshi, Shaoting Zhu, Hang Su," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 564, + 260 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 331, + 56, + 564, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 56, + 564, + 103 + ], + "spans": [ + { + "bbox": [ + 331, + 56, + 564, + 103 + ], + "type": "text", + "content": "Hang Zhao, and Yang Gao. Roboengine: Plug-and-play robot data augmentation with semantic robot segmentation and background generation. arXiv preprint arXiv:2503.18738, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 104, + 564, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 104, + 564, + 163 + ], + "spans": [ + { + "bbox": [ + 310, + 104, + 564, + 163 + ], + "type": "text", + "content": "[69] Zhecheng Yuan, Tianming Wei, Shuiqi Cheng, Gu Zhang, Yuanpei Chen, and Huazhe Xu. Learning to manipulate anywhere: A visual generalizable framework for reinforcement learning. arXiv preprint arXiv:2407.15815, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 164, + 564, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 164, + 564, + 200 + ], + "spans": [ + { + "bbox": [ + 310, + 164, + 564, + 200 + ], + "type": "text", + "content": "[70] Xinyu Zhang and Abdeslam Boullarias. One-shot imitation learning with invariance matching for robotic manipulation. arXiv preprint arXiv:2405.13178, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 200, + 564, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 200, + 564, + 260 + ], + "spans": [ + { + "bbox": [ + 310, + 200, + 564, + 260 + ], + "type": "text", + "content": "[71] Yuhang Zheng, Xiangyu Chen, Yupeng Zheng, Songen Gu, Runyi Yang, Bu Jin, Pengfei Li, Chengliang Zhong, Zengmao Wang, Lina Liu, et al. Gaussiangrasper: 3d language gaussian splatting for open-vocabulary robotic grasping. arXiv preprint arXiv:2403.09637, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 150, + 57, + 197, + 67 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 57, + 197, + 67 + ], + "spans": [ + { + "bbox": [ + 150, + 57, + 197, + 67 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 74, + 288, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 74, + 288, + 87 + ], + "spans": [ + { + "bbox": [ + 45, + 74, + 288, + 87 + ], + "type": "text", + "content": "A. Applying Transformation and Scaling to 3D Gaussians" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 91, + 300, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 91, + 300, + 114 + ], + "spans": [ + { + "bbox": [ + 45, + 91, + 300, + 114 + ], + "type": "text", + "content": "This section outlines how to apply transformations (translation, rotation) and scaling to 3D Gaussians." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 115, + 301, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 115, + 301, + 199 + ], + "spans": [ + { + "bbox": [ + 45, + 115, + 301, + 199 + ], + "type": "text", + "content": "The Gaussian primitive typically possesses three core properties: 1) a center position in three-dimensional space; 2) an orientation that specifies the tilt of its principal axes, commonly represented as a quaternion; 3) a scale indicating its width or narrowness. Additionally, Gaussian primitives can be enhanced with Spherical Harmonics (SH) to capture complex, direction-dependent color features." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 199, + 301, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 199, + 301, + 283 + ], + "spans": [ + { + "bbox": [ + 45, + 199, + 301, + 283 + ], + "type": "text", + "content": "When applying a transformation to the Gaussian primitive, the following steps should be taken: 1) update the center position by scaling, rotating, and then adding the translation offset; 2) update the orientation by combining the existing rotation with the new rotation; 3) adjust the scale by multiplying by the scaling factor; 4) rotate the Spherical Harmonics coefficients by using the Wigner D matrices." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 293, + 260, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 293, + 260, + 306 + ], + "spans": [ + { + "bbox": [ + 45, + 293, + 260, + 306 + ], + "type": "text", + "content": "B. Details of Demonstration Augmentation Process" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 310, + 300, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 310, + 300, + 346 + ], + "spans": [ + { + "bbox": [ + 45, + 310, + 300, + 346 + ], + "type": "text", + "content": "We expand on the details of the demonstration augmentation process in this section. An illustration of augmented demonstrations is provided in Fig. 12." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 347, + 121, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 347, + 121, + 358 + ], + "spans": [ + { + "bbox": [ + 57, + 347, + 121, + 358 + ], + "type": "text", + "content": "1) Object pose" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 359, + 301, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 359, + 301, + 418 + ], + "spans": [ + { + "bbox": [ + 45, + 359, + 301, + 418 + ], + "type": "text", + "content": "As mentioned in Sec. IV-B1, we transform the end-effector poses at key frames equivariantly according to the transformation that is applied to the target object. However, considering the symmetry of the gripper, we perform post-processing on the transformed end-effector pose." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "text", + "content": "Suppose the rotation of the transformed end-effector pose can be expressed as " + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "inline_equation", + "content": "(r_x, r_y, r_z)" + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "text", + "content": " in the format of XYZ Euler angles. We replace " + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "inline_equation", + "content": "r_z" + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "inline_equation", + "content": "r_z'" + }, + { + "bbox": [ + 45, + 419, + 301, + 456 + ], + "type": "text", + "content": ", which can be calculated as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 470, + 238, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 470, + 238, + 515 + ], + "spans": [ + { + "bbox": [ + 107, + 470, + 238, + 515 + ], + "type": "interline_equation", + "content": "r _ {z} ^ {\\prime} = \\left\\{ \\begin{array}{l l} r _ {z} & - \\frac {\\pi}{2} \\leqslant r _ {z} \\leqslant \\frac {\\pi}{2} \\\\ r _ {z} + \\pi & r _ {z} < - \\frac {\\pi}{2} \\\\ r _ {z} - \\pi & r _ {z} > \\frac {\\pi}{2}. \\end{array} \\right.", + "image_path": "07c3f6229256043d17b3a2f319c0a1d3515f59cb4893de684e02b0fed26d18e1.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "spans": [ + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "type": "text", + "content": "The resulting Euler angles " + }, + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "type": "inline_equation", + "content": "(r_x, r_y, r_z')" + }, + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "type": "text", + "content": " form the final rotation of the end-effector, which prevents the end-effector from performing redundant rotation along its " + }, + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 45, + 520, + 299, + 556 + ], + "type": "text", + "content": "-axis." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 557, + 127, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 557, + 127, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 557, + 127, + 567 + ], + "type": "text", + "content": "2) Camera view" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "content": "As aforementioned in Sec. V-D3, we enumerate the hyperparameters of camera view augmentations and their range of randomization in Table III. Suppose the camera view in the expert demonstration has target point " + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "inline_equation", + "content": "O_{c}^{\\mathrm{expert}} = (x_{c}^{0},y_{c}^{0},z_{c}^{0})" + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "content": " and corresponding spherical coordinates " + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "inline_equation", + "content": "(r^0,\\theta^0,\\varphi^0)" + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "content": ". Thereby, the target point " + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "inline_equation", + "content": "O_{c} = (x_{c},y_{c},z_{c})" + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "content": " and corresponding spherical coordinates " + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "inline_equation", + "content": "(r,\\theta ,\\varphi)" + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "content": " are sampled from uniform distributions, ranging between " + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "inline_equation", + "content": "(x_c^0\\pm \\Delta x_c,y_c^0\\pm \\Delta y_c,z_c^0\\pm \\Delta z_c,r^0\\pm \\Delta r,\\theta^0\\pm \\Delta \\theta ,\\varphi^0\\pm \\Delta \\varphi)" + }, + { + "bbox": [ + 45, + 569, + 301, + 677 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 678, + 148, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 678, + 148, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 678, + 148, + 689 + ], + "type": "text", + "content": "3) Lighting condition" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 45, + 689, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 727 + ], + "type": "text", + "content": "We present the hyperparameters of lighting condition augmentation in this section. First, we normalize the RGB values of each pixel with minimum value 0 and maximum value 1." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 375, + 82, + 495, + 182 + ], + "blocks": [ + { + "bbox": [ + 308, + 53, + 563, + 76 + ], + "lines": [ + { + "bbox": [ + 308, + 53, + 563, + 76 + ], + "spans": [ + { + "bbox": [ + 308, + 53, + 563, + 76 + ], + "type": "text", + "content": "TABLE III: Camera view augmentation hyperparameters and their range of randomization." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 375, + 82, + 495, + 182 + ], + "lines": [ + { + "bbox": [ + 375, + 82, + 495, + 182 + ], + "spans": [ + { + "bbox": [ + 375, + 82, + 495, + 182 + ], + "type": "table", + "html": "
HyperparameterValue
Δxc0.1(m)
Δyc0.1(m)
Δzc0.1(m)
Δr0.2(m)
Δθπ/6
Δφπ/6
", + "image_path": "a41aa0ae08cfe0f40d88521de28cdd9b71e1cb3a141d22dbcaafc61c97e8a93c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 205, + 563, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 205, + 563, + 230 + ], + "spans": [ + { + "bbox": [ + 308, + 205, + 563, + 230 + ], + "type": "text", + "content": "Then, we stipulate that the hyperparameters are sampled from the following distributions:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 358, + 247, + 512, + 292 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 247, + 512, + 292 + ], + "spans": [ + { + "bbox": [ + 358, + 247, + 512, + 292 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left(\\Delta_ {r}, \\Delta_ {g}, \\Delta_ {b}\\right) \\sim \\mathcal {N} (\\mathbf {0}, 0. 1 ^ {2} \\mathbf {I}), \\\\ s _ {r}, s _ {g}, s _ {b} \\sim \\text {U n i f o r m} (0. 3, 1. 8), \\\\ o _ {r}, o _ {g}, o _ {b} \\sim \\text {U n i f o r m} (- 0. 3, 0. 3). \\\\ \\end{array}", + "image_path": "252ddea6586c69789ae6820fda4aa0db728af581d55c74106f6d42b8189207cf.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 308, + 404, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 308, + 404, + 320 + ], + "spans": [ + { + "bbox": [ + 309, + 308, + 404, + 320 + ], + "type": "text", + "content": "C. Policy Architecture" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 325, + 563, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 325, + 563, + 409 + ], + "spans": [ + { + "bbox": [ + 307, + 325, + 563, + 409 + ], + "type": "text", + "content": "As illustrated in Fig. 13, the policy processes two types of inputs: images and robot states. We use different encoders to tokenize each modality accordingly. For image inputs, the images are first passed through a ResNet-18 vision encoder to generate visual embeddings. We employ a linear layer to extract compact visual features. For the robot state, we encode it into state tokens using a multi-layer perceptron (MLP)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 410, + 563, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 410, + 563, + 481 + ], + "spans": [ + { + "bbox": [ + 307, + 410, + 563, + 481 + ], + "type": "text", + "content": "The multi-modal encoder in our model is based on a GPT-2 style transformer architecture. Before feeding the sequential image and state tokens into the transformer, we append readout tokens [ACT] to the end. These readout tokens attend to embeddings from different modalities, serving as action latents used for action prediction." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 482, + 563, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 482, + 563, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 482, + 563, + 566 + ], + "type": "text", + "content": "Encoded by the multi-modal encoder, the action latents generated by the [ACT] tokens are fed into the readout decoders to predict actions. The action decoder utilizes an MLP to transform the action latent into the action vector. We predict a chunk of 10 future actions. Compared to single-step action prediction, predicting multiple steps provides temporal action consistency and robustness to idle actions [11]." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 577, + 394, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 577, + 394, + 589 + ], + "spans": [ + { + "bbox": [ + 309, + 577, + 394, + 589 + ], + "type": "text", + "content": "D. Training Details" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 594, + 563, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 594, + 563, + 676 + ], + "spans": [ + { + "bbox": [ + 307, + 594, + 563, + 676 + ], + "type": "text", + "content": "During training, the input at each timestep consists of two images captured from two eye-on-base cameras, along with the robot state. The robot state includes both the arm state and the gripper state. The gripper state is binary, indicating whether the gripper is open or closed. For the Franka FR3 robot, the arm state is 7-dimensional, while for the UR5e robot, it is 6-dimensional." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 677, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 564, + 727 + ], + "type": "text", + "content": "The policy operates with a history length of 1, and the size of the action chunk is set to 10. During inference, we utilize temporal ensemble techniques to compute a weighted average of the multi-step actions." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 50, + 537, + 518 + ], + "blocks": [ + { + "bbox": [ + 70, + 50, + 537, + 518 + ], + "lines": [ + { + "bbox": [ + 70, + 50, + 537, + 518 + ], + "spans": [ + { + "bbox": [ + 70, + 50, + 537, + 518 + ], + "type": "image", + "image_path": "3d6896c1cb137ca01211f8119d84a7c86a3674a73e95b5832cbeaff0625eac10.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 521, + 564, + 547 + ], + "lines": [ + { + "bbox": [ + 45, + 521, + 564, + 547 + ], + "spans": [ + { + "bbox": [ + 45, + 521, + 564, + 547 + ], + "type": "text", + "content": "Fig. 12: Illustration of augmented demonstrations. Type of generalization from the top row to the bottom row: object pose, lighting condition, scene appearance, object type, camera view, and embodiment type." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 563, + 300, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 563, + 300, + 624 + ], + "spans": [ + { + "bbox": [ + 45, + 563, + 300, + 624 + ], + "type": "text", + "content": "The policy is trained using a single NVIDIA RTX 4090 GPU, with a batch size of 256 and a learning rate of 1e-4. Depending on the number of demonstrations, the policy is trained for varying numbers of epochs. The hyperparameters used during training are detailed in Table IV." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 628, + 253, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 628, + 253, + 640 + ], + "spans": [ + { + "bbox": [ + 45, + 628, + 253, + 640 + ], + "type": "text", + "content": "E. Illustration of Real-World Experiment Settings" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 643, + 300, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 643, + 300, + 690 + ], + "spans": [ + { + "bbox": [ + 45, + 643, + 300, + 690 + ], + "type": "text", + "content": "We illustrate the experiment settings on lighting condition generalization in Fig. 14. The flashing light alternates between red and blue light at a frequency of " + }, + { + "bbox": [ + 45, + 643, + 300, + 690 + ], + "type": "inline_equation", + "content": "4\\mathrm{Hz}" + }, + { + "bbox": [ + 45, + 643, + 300, + 690 + ], + "type": "text", + "content": ". Every lighting condition takes up 6 trials in a single experiment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 691, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 691, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 691, + 301, + 727 + ], + "type": "text", + "content": "Besides, we present the real-world settings on appearance generalization in Fig. 15. Each scenario accounts for 5 trials in a single experiment." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 323, + 577, + 547, + 693 + ], + "blocks": [ + { + "bbox": [ + 323, + 577, + 547, + 693 + ], + "lines": [ + { + "bbox": [ + 323, + 577, + 547, + 693 + ], + "spans": [ + { + "bbox": [ + 323, + 577, + 547, + 693 + ], + "type": "image", + "image_path": "769c1cb609e8187e90aa9a54525152cb88b18d9c939ca5cea3edcf9e9d686ff0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 697, + 499, + 710 + ], + "lines": [ + { + "bbox": [ + 375, + 697, + 499, + 710 + ], + "spans": [ + { + "bbox": [ + 375, + 697, + 499, + 710 + ], + "type": "text", + "content": "Fig. 13: Policy architecture." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 201, + 87, + 405, + 367 + ], + "blocks": [ + { + "bbox": [ + 206, + 69, + 403, + 82 + ], + "lines": [ + { + "bbox": [ + 206, + 69, + 403, + 82 + ], + "spans": [ + { + "bbox": [ + 206, + 69, + 403, + 82 + ], + "type": "text", + "content": "TABLE IV: Policy training hyperparameters." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 201, + 87, + 405, + 367 + ], + "lines": [ + { + "bbox": [ + 201, + 87, + 405, + 367 + ], + "spans": [ + { + "bbox": [ + 201, + 87, + 405, + 367 + ], + "type": "table", + "html": "
Batch Size256
Learning Rate1e-4
Training Epochs1400 (100 demonstrations)
1000 (200 demonstrations)
800 (400 demonstrations)
700 (800 demonstrations)
500 (1800 demonstrations)
300 (3200 demonstrations)
200 (6400 demonstrations)
Image Size128*128
OptimizerAdamW
History Length1
Action Chunk Length10
", + "image_path": "634185e845433b270dbf4c6ed6419b70d4472463ac896032f1924fce80c1f39d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 70, + 414, + 225, + 530 + ], + "blocks": [ + { + "bbox": [ + 70, + 414, + 225, + 530 + ], + "lines": [ + { + "bbox": [ + 70, + 414, + 225, + 530 + ], + "spans": [ + { + "bbox": [ + 70, + 414, + 225, + 530 + ], + "type": "image", + "image_path": "59207633f7c3959f655ac6bb14b32176d3c231d6318185a78be5b3be508d1b8a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 534, + 187, + 544 + ], + "lines": [ + { + "bbox": [ + 107, + 534, + 187, + 544 + ], + "spans": [ + { + "bbox": [ + 107, + 534, + 187, + 544 + ], + "type": "text", + "content": "(a) Flashing light (Red)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 228, + 414, + 383, + 530 + ], + "blocks": [ + { + "bbox": [ + 228, + 414, + 383, + 530 + ], + "lines": [ + { + "bbox": [ + 228, + 414, + 383, + 530 + ], + "spans": [ + { + "bbox": [ + 228, + 414, + 383, + 530 + ], + "type": "image", + "image_path": "16dd2a576111e4901e9f33a58f76893230b27ec24632b63a391010d321ebb65c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 264, + 534, + 346, + 544 + ], + "lines": [ + { + "bbox": [ + 264, + 534, + 346, + 544 + ], + "spans": [ + { + "bbox": [ + 264, + 534, + 346, + 544 + ], + "type": "text", + "content": "(b) Flashing light (Blue)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 385, + 414, + 541, + 530 + ], + "blocks": [ + { + "bbox": [ + 385, + 414, + 541, + 530 + ], + "lines": [ + { + "bbox": [ + 385, + 414, + 541, + 530 + ], + "spans": [ + { + "bbox": [ + 385, + 414, + 541, + 530 + ], + "type": "image", + "image_path": "329b55cea8fe8b03a6e530fbf3a83fabe0ed3b487f4a86120899ab12039a36be.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 534, + 487, + 544 + ], + "lines": [ + { + "bbox": [ + 440, + 534, + 487, + 544 + ], + "spans": [ + { + "bbox": [ + 440, + 534, + 487, + 544 + ], + "type": "text", + "content": "(c) Dark light" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 70, + 552, + 225, + 670 + ], + "blocks": [ + { + "bbox": [ + 70, + 552, + 225, + 670 + ], + "lines": [ + { + "bbox": [ + 70, + 552, + 225, + 670 + ], + "spans": [ + { + "bbox": [ + 70, + 552, + 225, + 670 + ], + "type": "image", + "image_path": "13dcaa9e3a9dfa6d3ab14b9ff9eb273533771f796df5a8d843bd8476f6aafff4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 673, + 174, + 684 + ], + "lines": [ + { + "bbox": [ + 121, + 673, + 174, + 684 + ], + "spans": [ + { + "bbox": [ + 121, + 673, + 174, + 684 + ], + "type": "text", + "content": "(d) Bright light" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 228, + 552, + 383, + 670 + ], + "blocks": [ + { + "bbox": [ + 228, + 552, + 383, + 670 + ], + "lines": [ + { + "bbox": [ + 228, + 552, + 383, + 670 + ], + "spans": [ + { + "bbox": [ + 228, + 552, + 383, + 670 + ], + "type": "image", + "image_path": "d5097ed7bc541524999fba782271c6bc4995c4fd1d3def8c5567bd83a2267a53.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 280, + 673, + 331, + 684 + ], + "lines": [ + { + "bbox": [ + 280, + 673, + 331, + 684 + ], + "spans": [ + { + "bbox": [ + 280, + 673, + 331, + 684 + ], + "type": "text", + "content": "(e) Green light" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 385, + 552, + 541, + 670 + ], + "blocks": [ + { + "bbox": [ + 385, + 552, + 541, + 670 + ], + "lines": [ + { + "bbox": [ + 385, + 552, + 541, + 670 + ], + "spans": [ + { + "bbox": [ + 385, + 552, + 541, + 670 + ], + "type": "image", + "image_path": "c977fae0b468eff6be7f5aa6a8aee6bcc3d814a04aeddbe4bf91e1d72ad21c80.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 673, + 489, + 684 + ], + "lines": [ + { + "bbox": [ + 436, + 673, + 489, + 684 + ], + "spans": [ + { + "bbox": [ + 436, + 673, + 489, + 684 + ], + "type": "text", + "content": "(f) Yellow light" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 146, + 693, + 463, + 706 + ], + "lines": [ + { + "bbox": [ + 146, + 693, + 463, + 706 + ], + "spans": [ + { + "bbox": [ + 146, + 693, + 463, + 706 + ], + "type": "text", + "content": "Fig. 14: Illustration of real-world experiment on lighting generalization." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 243, + 225, + 361 + ], + "blocks": [ + { + "bbox": [ + 69, + 243, + 225, + 361 + ], + "lines": [ + { + "bbox": [ + 69, + 243, + 225, + 361 + ], + "spans": [ + { + "bbox": [ + 69, + 243, + 225, + 361 + ], + "type": "image", + "image_path": "2e0fd12ea391852ff252c6fa013f58c83c31631f5f61d1967a0de0a5897373ab.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 366, + 151, + 374 + ], + "lines": [ + { + "bbox": [ + 140, + 366, + 151, + 374 + ], + "spans": [ + { + "bbox": [ + 140, + 366, + 151, + 374 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 227, + 243, + 383, + 361 + ], + "blocks": [ + { + "bbox": [ + 227, + 243, + 383, + 361 + ], + "lines": [ + { + "bbox": [ + 227, + 243, + 383, + 361 + ], + "spans": [ + { + "bbox": [ + 227, + 243, + 383, + 361 + ], + "type": "image", + "image_path": "d9cd8ada7b70be3c181ebc936eaa3c6cb7f99d355e76e7d0442c35452458b5ee.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 366, + 309, + 374 + ], + "lines": [ + { + "bbox": [ + 298, + 366, + 309, + 374 + ], + "spans": [ + { + "bbox": [ + 298, + 366, + 309, + 374 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 384, + 243, + 542, + 361 + ], + "blocks": [ + { + "bbox": [ + 384, + 243, + 542, + 361 + ], + "lines": [ + { + "bbox": [ + 384, + 243, + 542, + 361 + ], + "spans": [ + { + "bbox": [ + 384, + 243, + 542, + 361 + ], + "type": "image", + "image_path": "7fb3bdba025175d006730e46359f428af9eb04d23d00c54192248e1fc363c727.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 456, + 366, + 466, + 374 + ], + "lines": [ + { + "bbox": [ + 456, + 366, + 466, + 374 + ], + "spans": [ + { + "bbox": [ + 456, + 366, + 466, + 374 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 69, + 382, + 226, + 501 + ], + "blocks": [ + { + "bbox": [ + 69, + 382, + 226, + 501 + ], + "lines": [ + { + "bbox": [ + 69, + 382, + 226, + 501 + ], + "spans": [ + { + "bbox": [ + 69, + 382, + 226, + 501 + ], + "type": "image", + "image_path": "713492fd54f41dfe9c9392349b53cc107ba56211f668b2e7a40851b6a50c577b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 505, + 151, + 514 + ], + "lines": [ + { + "bbox": [ + 140, + 505, + 151, + 514 + ], + "spans": [ + { + "bbox": [ + 140, + 505, + 151, + 514 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 227, + 382, + 383, + 501 + ], + "blocks": [ + { + "bbox": [ + 227, + 382, + 383, + 501 + ], + "lines": [ + { + "bbox": [ + 227, + 382, + 383, + 501 + ], + "spans": [ + { + "bbox": [ + 227, + 382, + 383, + 501 + ], + "type": "image", + "image_path": "ce3b3d5d5e34b5db3092a19eba1ca889fe0cf2d6719286ac72de291560196841.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 506, + 309, + 514 + ], + "lines": [ + { + "bbox": [ + 298, + 506, + 309, + 514 + ], + "spans": [ + { + "bbox": [ + 298, + 506, + 309, + 514 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 384, + 382, + 542, + 501 + ], + "blocks": [ + { + "bbox": [ + 384, + 382, + 542, + 501 + ], + "lines": [ + { + "bbox": [ + 384, + 382, + 542, + 501 + ], + "spans": [ + { + "bbox": [ + 384, + 382, + 542, + 501 + ], + "type": "image", + "image_path": "b3bb47439709f6859a0518f05864ad27d39444c7291615594c3434026ef8983f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 505, + 466, + 514 + ], + "lines": [ + { + "bbox": [ + 457, + 505, + 466, + 514 + ], + "spans": [ + { + "bbox": [ + 457, + 505, + 466, + 514 + ], + "type": "text", + "content": "(f)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 137, + 523, + 472, + 536 + ], + "lines": [ + { + "bbox": [ + 137, + 523, + 472, + 536 + ], + "spans": [ + { + "bbox": [ + 137, + 523, + 472, + 536 + ], + "type": "text", + "content": "Fig. 15: Illustration of real-world experiment on appearance generalization." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_content_list.json b/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..bc336553e49371e0256a9df14054e52473971a54 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_content_list.json @@ -0,0 +1,1918 @@ +[ + { + "type": "text", + "text": "IMAGGarment: Fine-Grained Garment Generation for Controllable Fashion Design", + "text_level": 1, + "bbox": [ + 91, + 70, + 903, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fei Shen, Jian Yu, Cong Wang, Xin Jiang, Xiaoyu Du, and Jinhui Tang, Senior Member, IEEE", + "bbox": [ + 140, + 147, + 848, + 164 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—This paper presents IMAGGarment, a fine-grained garment generation (FGG) framework that enables high-fidelity garment synthesis with precise control over silhouette, color, and logo placement. Unlike existing methods that are limited to single-condition inputs, IMAGGarment addresses the challenges of multi-conditional controllability in personalized fashion design and digital apparel applications. Specifically, IMAGGarment employs a two-stage training strategy to separately model global appearance and local details, while enabling unified and controllable generation through end-to-end inference. In the first stage, we propose a global appearance model that jointly encodes silhouette and color using a mixed attention module and a color adapter. In the second stage, we present a local enhancement model with an adaptive appearance-aware module to inject user-defined logos and spatial constraints, enabling accurate placement and visual consistency. To support this task, we release GarmentBench, a large-scale dataset comprising over 180K garment samples paired with multi-level design conditions, including sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that our method outperforms existing baselines, achieving superior structural stability, color fidelity, and local controllability performance. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment.", + "bbox": [ + 73, + 220, + 491, + 525 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Fine-Grained Garment Generation, Multi-Conditional Generation, Fashion Design Applications, Garment-Bench Dataset.", + "bbox": [ + 73, + 529, + 491, + 569 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 592, + 351, + 606 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fine-Grained garment generation (FGG) aims to synthesize high-quality garments with precise control over garment silhouette, color scheme, logo content, and spatial placement. As personalized fashion and the digital apparel market grow rapidly, fine-grained controllability [1]–[4] is increasingly crucial for applications in fashion design and e-commerce.", + "bbox": [ + 73, + 612, + 490, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In traditional garment ideation [5], [6] and visualization [7], [8], designers analyze line drawings to establish silhouette and construction, then select color palettes and materials, and", + "bbox": [ + 73, + 703, + 491, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fei Shen is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the NExT++ Research Centre, National University of Singapore, Singapore, e-mail: shenfei29@nus.edu.sg", + "bbox": [ + 73, + 761, + 488, + 806 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jian Yu, Xin Jiang, and Xiaoyu Du are with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China. e-mail: jianyu@njust.edu.cn; xinjiang@njust.edu.cn; duxy@njust.edu.cn.", + "bbox": [ + 73, + 806, + 491, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cong Wang is with the State Key Laboratory for Novel Software Technology and the School of Computer Science, Nanjing University, Nanjing, 210023, China. e-mail: cw@smail.nju.edu.cn", + "bbox": [ + 73, + 852, + 491, + 886 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinhui Tang is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the College of Information Science and Technology and Artificial Intelligence, Nanjing Forestry University, Nanjing 210037, China, e-mail: jinhuitang@njust.edu.cn. (Corresponding author: Jinhui Tang.)", + "bbox": [ + 73, + 886, + 491, + 944 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "finally arrange brand elements such as logos and trims. This manual workflow has two persistent drawbacks. First, it is time consuming: to match the specification, edits must be applied object by object and view by view; in a seasonal collection, even identical panels within the same board are recolored or relabeled one at a time, which does not scale. Second, it is error prone and inconsistent: small deviations in hue, shading, or logo placement arise across artists and rounds of revision, yielding mismatches across styles, sizes, and camera viewpoints. As project scope grows, these issues inflate turnaround time and complicate quality control and version management.", + "bbox": [ + 501, + 219, + 921, + 400 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, image synthesis [9]–[12] has made notable progress in tasks such as sketch-to-image generation [13]–[16] and logo insertion [17]–[19] (as illustrated in Fig. 1 (a)), demonstrating basic capabilities in structural and content-level control. However, these tasks [13], [17], [20] provide only coarse guidance and rely on single-condition inputs (e.g., sketch or color), lacking the fine-grained controllability needed to model the nuanced interactions between global structure and local details in garment design. Although sequential or modular combinations may offer partial solutions, they [21]–[23] fail to explicitly disentangle and jointly model global attributes (e.g., silhouette, color) and local appearance details (e.g., logo content and spatial placement). Without unified control mechanisms, these approaches [21]–[23] often suffer from condition entanglement, conflicting objectives, and visual inconsistencies, ultimately falling short of the high standards required in real-world fashion design. In contrast, practical fashion design [5], [6] requires joint control over multiple interdependent factors: designers determine global attributes such as silhouette and color, followed by fine-tuning of local elements like logos and their placement. To support this process, a unified generation task that clearly separates and coordinates global and local attributes is essential for controllable and high-fidelity synthesis.", + "bbox": [ + 501, + 401, + 921, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address these limitations, we propose a new task: fine-grained garment generation (FGG), as illustrated in Fig. 1 (b). FGG is formulated as a unified multi-conditional garment synthesis task, taking a textual prompt, garment silhouette, color palette, and spatially constrained logos as joint inputs. It aims to generate garments that faithfully reflect high-level structural intent and fine-grained local styling cues. FGG is specifically designed to mirror real-world fashion workflows, where designers must coordinate diverse input modalities to express creative intent. Unlike conventional approaches that process each condition independently or sequentially, FGG emphasizes joint modeling and hierarchical reasoning across", + "bbox": [ + 501, + 763, + 921, + 944 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13176v2 [cs.CV] 8 Sep 2025", + "bbox": [ + 22, + 282, + 60, + 710 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0f5fef9dcc1b1ccc41c901842a791c5804778bc2b138eee313ea6ae6355ba813.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 56, + 292, + 196 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/834d6a64f5df6d2fb0ff04e31586fc15aec16332f5b005e9fecf16759317588f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 56, + 496, + 196 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ee977948bf238d1af469fbcbf6fb6da7daa33661a9b3df5feb126e4f4da014d3.jpg", + "image_caption": [ + "(a) Sketch-to-image and logo insertion task", + "(c) Generalization capability in real-world applications" + ], + "image_footnote": [], + "bbox": [ + 99, + 224, + 496, + 441 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6200d8f841819a2ebd4f5e5c474a2ce4d32cfcbf74b8dfbe03fdb8337da294c7.jpg", + "image_caption": [ + "(b) Fine-grained garment generation task" + ], + "image_footnote": [], + "bbox": [ + 531, + 55, + 880, + 207 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/58d0650f24a575b2f82468b4fa675a9645b69d7fdb4dea11c4f4085cc207d7c3.jpg", + "image_caption": [ + "Fig. 1. Comparison of (a) existing sketch-to-image and logo insertion tasks with (b) our proposed fine-grained garment generation (FGG) task, which enables precise and controllable synthesis of garment structure, color, logo, and spatial placement. Unlike previous tasks that rely on a single input condition, FGG is tailored for real-world fashion design workflows by integrating multiple conditional controls." + ], + "image_footnote": [], + "bbox": [ + 529, + 224, + 898, + 441 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "input types. It goes beyond simple task combinations by enforcing consistent integration of global and local attributes within a unified generation framework, enabling nuanced control over the overall structure and detailed appearance. Specifically, FGG task introduces three key challenges: (1) maintaining visual and semantic consistency across heterogeneous input conditions, (2) resolving conflicts between global structures and localized visual elements, and (3) generalizing to unseen condition combinations without retraining (see Fig. 1(c)). FGG thus marks a fundamental shift from single-condition or loosely coupled pipelines toward a unified, design-intent-driven generation paradigm that better reflects the complexity of real-world garment design.", + "bbox": [ + 73, + 534, + 491, + 731 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose IMAGGarment, a two-stage training and end-to-end inference framework tailored for fine-grained garment generation. Unlike prior methods that rely on single-condition inputs or simple feature fusion, our framework is explicitly designed to achieve fine-grained controllability under multiple, interdependent constraints. In the first stage, we propose a global appearance model with a mixed attention module and a color adapter to jointly encode garment silhouette and color palette, improving overall appearance fidelity and mitigating condition entanglement. In the second stage, we present a local enhancement model equipped with an adaptive appearance-aware module to inject user-defined logos and their spatial constraints, enabling precise logo placement while preserving global consistency. To further promote research in", + "bbox": [ + 73, + 733, + 491, + 945 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "this direction, we release GarmentBench, a large-scale dataset comprising over 180k garment samples annotated with rich multi-level design conditions, including silhouette sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that IMAGGarment significantly outperforms existing baselines in terms of structural stability and local controllability. To summarize, the main contributions are listed as follows:", + "bbox": [ + 501, + 534, + 921, + 654 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose IMAGGarment, a controllable garment generation framework that enables precise control over garment structure, color, and logo placement, addressing the challenges of FGG.", + "- We design a mixed attention module, color adapter, and adaptive appearance-aware module to disentangle global structure from local attributes, achieving fine-grained visual control and accurate spatial control.", + "- We release GarmentBench, a large-scale dataset with diverse garments and rich multi-conditional annotations, serving as a valuable benchmark for controllable garment generation research." + ], + "bbox": [ + 519, + 656, + 921, + 837 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The remainder of this paper is organized as follows. Section II surveys prior work on garment generation, encompassing GAN-based techniques and diffusion-based controllable generation. Section III describes the proposed IMAGGarment methodology, comprising a global appearance model with mixed attention and a color adapter, a local enhancement model with the A3 module, and the associated training and", + "bbox": [ + 503, + 839, + 921, + 945 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "inference strategies. Section IV presents the experimental protocol and results, including the GarmentBench dataset and evaluation metrics, implementation details, and results and analysis. Section V concludes the paper.", + "bbox": [ + 73, + 69, + 491, + 131 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "II. RELATED WORK", + "text_level": 1, + "bbox": [ + 209, + 143, + 357, + 157 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. GAN-Based Methods", + "text_level": 1, + "bbox": [ + 73, + 164, + 243, + 178 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Early approaches [24]–[29] to garment generation predominantly build on generative adversarial networks (GANs) [30]–[32], with a major line devoted to sketch-to-image translation [33] that learns spatial mappings from structural cues. Representative systems such as DeepFaceDrawing [24] and DeepFaceEditing [25] decompose sketches into semantic components and progressively assemble photorealistic results, while DeepPortraitDrawing [26] extends this paradigm to full-body synthesis via local-to-global pipelines. Interactive frameworks [27] further introduce gating mechanisms for user-guided editing, and DALColor [34] combines WGAN-GP [35] with line-art colorization for refined appearance control. Beyond sketches, related GAN-based efforts explore pose- or part-guided generation [36], [37], leveraging learned warping or deformable alignment to better propagate structural constraints from sources to targets.", + "bbox": [ + 73, + 181, + 491, + 425 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, these methods [24]–[27] are largely restricted to single-condition settings (e.g., sketches or poses alone), making it difficult to support real-world fashion scenarios that require joint control over multiple factors such as silhouette, garment layers, color/pattern, and local embellishments. Moreover, adversarial training is prone to instability and visual artifacts [32], [36], [37], and the reliance on paired or carefully aligned supervision limits robustness to occlusion, diverse body shapes, and open-world catalogs. As a result, while GAN-based pipelines can produce plausible textures under constrained conditions, they struggle to achieve reliable, fine-grained, and multi-conditional controllability at scale.", + "bbox": [ + 73, + 424, + 493, + 608 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Diffusion-Based Methods", + "text_level": 1, + "bbox": [ + 73, + 625, + 269, + 638 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Diffusion models [38]–[40] have achieved strong progress in conditional image generation owing to their iterative denoising process and flexible conditioning interfaces. To improve controllability with minimal modification to large backbones, plugin-based approaches such as IP-Adapter [21], ControlNet [22], and BLIP-Diffusion [41] inject external conditions (e.g., reference images, structural maps, or language cues) through lightweight adapters. In parallel, reference-guided or dual-stream designs [42]–[45] propagate features from exemplars alongside text/image prompts, thereby strengthening identity preservation and fine control during sampling.", + "bbox": [ + 73, + 642, + 491, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In fashion-related applications, DiffCloth [46] supports localized garment edits via part-specific textual prompts, enabling independent control over regions such as sleeves and collars. For logo-centric generation, AnyLogo [18] adopts a dual-state denoising strategy to retain subtle logo details; LogoSticker [19] performs token-based injection to flexibly place logo elements; and RefDiffuser [17] leverages expert-driven plugins to enhance texture fidelity and spatial alignment. Despite these advances, most methods emphasize either global", + "bbox": [ + 73, + 809, + 491, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/380582041820ddac62131e41cfc36154d411b7799852e3e145e7ca28d41e33bc.jpg", + "table_caption": [ + "TABLEI DEFINITIONS OF MAIN SYMBOLS USED IN THIS PAPER." + ], + "table_footnote": [], + "table_body": "
NotationDefinition
tTimestep
ZtLatent feature at t step
ZmOutput of mixed attention
x0Real image
xtNoisy data at t step
GGarment image
LLogo image
MMask image
CgFeature of garment image
ClFeature of logo image
CmFeature of mask image
CsFeature of silhouette image
CcFeature of color image
CtFeature of text prompt
θgGlobal appearance model
θlLocal enhancement model
εGaussian noise
αtCumulative product of noise weights
wGuidance scale
αSilhouette scale
βColor scale
", + "bbox": [ + 566, + 103, + 856, + 358 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "appearance control or localized editing in isolation. A unified framework that jointly models multiple design conditions, e.g., silhouette and layer topology together with color/pattern and local embellishments, while maintaining structural coherence across the denoising trajectory remains underexplored.", + "bbox": [ + 503, + 383, + 921, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "III. METHODOLOGY", + "text_level": 1, + "bbox": [ + 637, + 477, + 787, + 491 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Symbol Definition. To introduce our IMAGGarment method more clearly, we define the main symbols used throughout the paper in TABLE I.", + "bbox": [ + 503, + 496, + 921, + 541 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Task Definition. Given a garment silhouette, color palette, user-defined logo, location and an optional text description, fine-grained garment generation (FGG) aims to synthesize high-fidelity garment images with precise control over both global structure and local visual attributes. The key challenges lie in jointly modeling multi-conditional inputs, maintaining semantic and visual consistency across different design factors, and supporting controllable placement of fine-grained elements such as logos and color regions.", + "bbox": [ + 503, + 541, + 921, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Overall Framework", + "text_level": 1, + "bbox": [ + 503, + 699, + 663, + 712 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address the above challenges, we propose IMAGGarment, a conditional diffusion framework tailored for fine-grained garment generation. Our framework comprises two components: a global appearance model (stage I) and a local enhancement model (stage II), which explicitly disentangle and jointly control the global appearance and local details under multi-conditional guidance, enabling accurate synthesis of garment silhouette, color, and logo placement. As illustrated in Fig. 2, the global appearance model first generates a latent of coarse garment image conditioned on the textual prompt, garment silhouette, and color palette. Subsequently, the local enhancement model refines this latent representation by integrating user-defined logo and spatial constraint, producing the final high-fidelity garment image with fine-grained controllability. Specifically, the global appearance model (Section III-B)", + "bbox": [ + 501, + 717, + 921, + 946 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 75, + 29, + 416, + 41 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/cb4c46b64e7895f57b8b540ab83c7a11fc75e7a22e0163f7d8735a205bd28267.jpg", + "image_caption": [ + "Fig. 2. Visualization of the IMAGGarment inference pipeline. The global appearance model generates coarse latent from textual prompts, silhouettes, and colors. The local enhancement model then injects user-defined logos and spatial location constraints to produce the fine-grained garment." + ], + "image_footnote": [], + "bbox": [ + 76, + 68, + 490, + 150 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "leverages our proposed mixed attention module and color adapter to effectively capture global appearance features from textual descriptions, silhouettes, and colors, while mitigating entanglement among these conditions. The local enhancement model (Section III-C) introduces an adaptive appearance-aware module ( $A^3$ Module) that injects logo content and spatial location constraint into the latent space, achieving precise logo placement. Finally, the training and inference strategies used in IMAGGarment are summarized in Section III-D.", + "bbox": [ + 73, + 234, + 491, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "B. Stage I: Global Appearance Model", + "text_level": 1, + "bbox": [ + 75, + 398, + 339, + 412 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Motivation. Existing garment generation methods [21]–[23] typically rely on single-condition inputs (e.g., sketch or text), causing entangled features and limited controllability. To resolve this, we propose a global appearance model that explicitly disentangles silhouette, color, and text, enabling precise multi-conditional control.", + "bbox": [ + 73, + 420, + 490, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Architecture. As shown in the left of the Fig. 3, our global appearance model comprises two shared frozen VAE encoders, one frozen VAE decoder, a trainable silhouette UNet, a frozen text encoder, a trainable color adapter, and a denoising UNet with the proposed mixed attention. Specifically, we first utilize the frozen VAE encoder to project the input reference silhouette into the latent space. Subsequently, we employ a trainable silhouette UNet (structurally identical to the denoising UNet but without cross attention) to extract fine-grained silhouette features, which are then integrated into the frozen denoising UNet via our proposed mixed attention module. Meanwhile, textual features obtained from the frozen CLIP text encoder and color features extracted by the proposed color adapter are further fused into the denoising UNet through cross attention. After multiple denoising iterations, the model generates coarse garment images that precisely align with the reference silhouette and faithfully reflect user-specified color.", + "bbox": [ + 73, + 511, + 491, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Mixed Attention. To effectively incorporate reference silhouette features into the denoising UNet without compromising the generative capability of the original UNet, we propose a mixed attention module. As shown in Fig. 3, we extend all self attention layers in the denoising UNet to the proposed mixed attention, which introduces two additional learnable projection layers to align the silhouette features $C_s$ with the latent features $Z_t$ . Formally, the mixed attention is defined as:", + "bbox": [ + 73, + 768, + 491, + 890 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nZ _ {m} = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}}\\right) V + \\alpha \\cdot \\operatorname {S o f t m a x} \\left(\\frac {Q \\left(K ^ {\\prime}\\right) ^ {T}}{\\sqrt {d}}\\right) V ^ {\\prime}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 83, + 898, + 490, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha$ is a hyperparameter controlling the strength of silhouette conditioning. The projections are computed as follows:", + "bbox": [ + 503, + 69, + 919, + 97 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nQ = Z _ {t} W _ {q}, K = Z _ {t} W _ {k}, V = Z _ {t} W _ {v}, K ^ {\\prime} = C _ {s} W _ {k} ^ {\\prime}, V ^ {\\prime} = C _ {s} W _ {v} ^ {\\prime} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 103, + 919, + 131 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $W_{q}, W_{k}, W_{v}$ are frozen parameters of linear projection layers, whereas $W_{k}^{\\prime}, W_{v}^{\\prime}$ are newly introduced learnable parameters of projection layers initialized from $W_{k}$ and $W_{v}$ , respectively. Our mixed attention facilitates the seamless integration of silhouette features into the denoising UNet, thus ensuring that generated garments maintain precise spatial alignment with the reference silhouette.", + "bbox": [ + 501, + 132, + 919, + 236 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Color Adapter. Accurate color manipulation is essential for generating garments with fine-grained visual details, significantly enhancing visual quality and realism. However, as the base model's textual prompts cannot reliably produce the intended colors, discrepancies often arise between the generated and expected colors. To address this issue, we propose a dedicated color adapter that explicitly treats color as an independent controllable factor. Specifically, given a reference color image, we extract color features $C_c$ using a frozen CLIP image encoder combined with a trainable linear layer. Subsequently, these color features are integrated into the denoising UNet via a cross attention mechanism, jointly with textual features $C_t$ obtained from the frozen CLIP text encoder:", + "bbox": [ + 501, + 237, + 921, + 446 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nZ _ {n e w} = \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {t} ^ {T}}{\\sqrt {d}}\\right) V _ {t} + \\beta \\cdot \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {c} ^ {T}}{\\sqrt {d}}\\right) V _ {c}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 452, + 919, + 488 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $Q = Z_{t}W_{q}$ , $K_{t} = C_{t}W_{k}^{t}$ , $V_{t} = C_{t}W_{v}^{t}$ , and $K_{c} = C_{c}W_{k}^{c}$ , $V_{c} = C_{c}W_{v}^{c}$ . Here, $W_{k}^{t}, W_{v}^{t}$ denote frozen parameters of the original cross attention layers in the denoising UNet, while $W_{k}^{c}, W_{v}^{c}$ are newly introduced trainable projection layers. The hyperparameter $\\beta$ modulates the adapter's influence, ensuring precise alignment between generated colors and user specifications.", + "bbox": [ + 503, + 493, + 919, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "C. Stage II: Local Enhancement Model", + "text_level": 1, + "bbox": [ + 504, + 622, + 777, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Motivation. Existing methods [18], [19] typically neglect detailed logo integration or treat it as a separate task, causing poor spatial alignment and visual inconsistency. To address this limitation, we propose a local enhancement model equipped with an adaptive appearance-aware $(A^3)$ module, explicitly injecting user-defined logos and spatial constraints into the latent space. This design enables precise, consistent control over localized garment details, significantly enhancing visual fidelity.", + "bbox": [ + 501, + 641, + 921, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Architecture. As illustrated on the right of Fig. 3, the local enhancement model comprises a frozen VAE encoder and decoder, a denoising UNet, and an adaptive appearance-aware module ( $A^3$ module). The $A^3$ module fuses local conditions, such as logos and spatial constraints, by concatenating them along spatial or channel dimensions, enabling precise control over fine-grained visual elements. Given a garment, logo, and placement mask, the model adaptively adjusts the logo's size and position while preserving its visual fidelity. To reduce redundancy and focus on local detail refinement, we optimize only the self attention layers of the denoising UNet and discard", + "bbox": [ + 501, + 777, + 921, + 945 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c72d485e9c001e310bafde876dc639ecb4c97bb638128363a67326dbe08778ca.jpg", + "image_caption": [ + "Fig. 3. Overview of our IMAGGarment framework. IMAGGarment is a two-stage conditional diffusion framework for fine-grained garment generation. The global appearance model first synthesizes a coarse latent representation from the input text prompt, silhouette, and color palette using a parallel UNet with mixed attention and a color adapter. The local enhancement model then refines this latent by injecting user-defined logos and location constraints through the proposed $A^3$ module, enabling precise logo placement and high-fidelity garment generation." + ], + "image_footnote": [], + "bbox": [ + 76, + 69, + 509, + 291 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/00509be361fe8ae840f0c55a7033c936f301abc0d110fd4824306c662b54050d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 70, + 919, + 290 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "all cross attention layers, as the global appearance model has already encoded the textual information.", + "bbox": [ + 73, + 376, + 491, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$A^3$ Module. To precisely integrate fine-grained logo details into designated garment regions, we introduce the adaptive appearance-aware $(A^3)$ module. By fusing image-based conditions across specific dimensions, our $A^3$ module enables precise and consistent logo integration. Specifically, given a coarse garment image $G$ , a logo image $L$ , and a binary placement mask $M$ , we first encode them using a frozen VAE encoder to obtain their corresponding latent features: $C_g \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{8}}$ and $C_l \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{8}}$ . The mask $M$ is resized via nearest-neighbor interpolation to match the latent resolution, resulting in $C_m \\in \\mathbb{R}^{1 \\times \\frac{H}{8} \\times \\frac{W}{8}}$ . We then construct the spatially aligned conditional input as:", + "bbox": [ + 73, + 405, + 491, + 588 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nX = \\operatorname {C o n c a t} \\left(C _ {g} \\otimes C _ {m}, C _ {l}\\right), \\quad X \\in \\mathbb {R} ^ {4 \\times \\frac {H}{8} \\times \\frac {W}{4}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 120, + 590, + 488, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\otimes$ denotes element-wise multiplication and Concat indicates spatial concatenation along the width dimension. To align with $X$ , the resized mask $C_m$ is zero-padded to obtain $C_M \\in \\mathbb{R}^{1 \\times \\frac{H}{8} \\times \\frac{W}{4}}$ . Next, we concatenate the garment and logo features to form a clean latent representation:", + "bbox": [ + 73, + 616, + 491, + 691 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nx _ {0} = \\operatorname {C o n c a t} \\left(C _ {g}, C _ {l}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 698, + 488, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and inject noise consistent with the diffusion process:", + "bbox": [ + 73, + 720, + 439, + 736 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nx _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} \\cdot x _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\cdot \\epsilon , \\quad \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 739, + 488, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $x_0$ denotes the clean latent feature obtained by concatenating garment and logo features, and $x_{t} \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{4}}$ is the corresponding noisy latent at diffusion timestep $t$ . $\\bar{\\alpha}_{t}$ is the cumulative product of the noise schedule coefficients, and $\\epsilon$ is the Gaussian noise sampled from $\\mathcal{N}(0,\\mathbf{I})$ . Finally, the full model input is obtained by concatenating the noisy latent $x_{t}$ , the padded mask $C_M$ , and the aligned conditional input $X$ along the channel dimension:", + "bbox": [ + 73, + 763, + 491, + 885 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nZ = \\operatorname {C o n c a t} \\left(x _ {t}, C _ {M}, X\\right), \\quad Z \\in \\mathbb {R} ^ {9 \\times \\frac {H}{8} \\times \\frac {W}{4}}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 888, + 488, + 909 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This channel-wise concatenation allows the model to jointly reason over appearance, spatial constraints, and guidance", + "bbox": [ + 73, + 914, + 491, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "signals, while maintaining compatibility with the UNet architecture for spatially aware logo synthesis.", + "bbox": [ + 503, + 376, + 919, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "D. Training and Inference", + "text_level": 1, + "bbox": [ + 504, + 431, + 687, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training. The training process is divided into two stages, each targeting a specific set of objectives with separate optimization strategies. We first train the global appearance model independently to generate a semantically coherent garment representation conditioned on silhouette and color. After verifying its performance, we freeze it and train the local enhancement model to inject fine-grained logos guided by spatial masks. This sequential training avoids gradient interference between heterogeneous objectives and ensures each module converges toward its task-specific goal. Both stages adopt mean squared error (MSE) loss to supervise the denoising process.", + "bbox": [ + 501, + 452, + 921, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage I. The global appearance model $\\theta_{g}$ is trained to synthesize garments that align with the target silhouette and color under textual guidance. To preserve the generative capacity of the pretrained denoising UNet, we freeze all parameters except those of the silhouette UNet and the cross-attention projections in the mixed attention module. Given silhouette features $C_s$ , text embeddings $C_t$ , and color features $C_c$ , we adopt a decoupled training strategy with $L_{\\mathrm{silhouette}}$ and $L_{\\mathrm{color}}$ losses:", + "bbox": [ + 503, + 619, + 921, + 755 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {s i l h o u e t t e}} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {t}, C _ {s}, t} \\| \\epsilon - \\epsilon_ {\\theta_ {g}} (x _ {t}, C _ {t}, C _ {s}, t) \\| ^ {2}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 761, + 919, + 787 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {c o l o r}} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {t}, C _ {c}, t} \\left\\| \\epsilon - \\epsilon_ {\\theta_ {g}} (x _ {t}, C _ {t}, C _ {c}, t) \\right\\| ^ {2},\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 784, + 880, + 804 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\epsilon$ is the added noise and $\\epsilon_{\\theta_g}$ is the prediction from the global appearance model at timestep $t$ .", + "bbox": [ + 503, + 811, + 919, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage II. The local enhancement model $\\theta_{l}$ refines the coarse latent by injecting logos at user-defined locations. To reduce overhead, we fine-tune only the self-attention layers of the logo UNet. Given logo feature $C_l$ , spatial mask $C_m$ , and garment latent $C_g$ , the training objective $L_{\\mathrm{logo}}$ is:", + "bbox": [ + 503, + 840, + 921, + 917 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\log o} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {l}, C _ {m}, C _ {g}, t} \\| \\epsilon - \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) \\| ^ {2}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 926, + 919, + 946 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/64a02efe184fb95c9778897310f672e19f2755ec83fe199f87014ef556ee84c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 71, + 344, + 296 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a8de64fc87ebbbb24fdc244dacf7fe6ce798d6fe30d9d7a74517f91cb90468d6.jpg", + "image_caption": [ + "(a) Dataset Construction Pipeline" + ], + "image_footnote": [], + "bbox": [ + 354, + 71, + 607, + 297 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6db6350dc95f55e02bea38610b5cce4f65ba7b193e8b6bad514799343caf2b2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 71, + 895, + 297 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/66e6e0f583c671bacbde2f78f77f1b0f2a3e1e9a4818ff3254c5f43361254b11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 101, + 316, + 344, + 419 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/326abcc7b866a086fbf4b235cb7a6accdd96a7ecdf6f25d4dae72b4360477921.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 385, + 316, + 637, + 419 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3e41e76eb094e74b4d129e749105281a6ca3f501c3a424d666aa1e98c60a1ef4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 316, + 895, + 419 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/87bcd73959fce6d8e915e15134827004325fc69c2bc2d357e668013804ed5736.jpg", + "image_caption": [ + "(b) Samples from the GarmentBench Dataset" + ], + "image_footnote": [], + "bbox": [ + 101, + 421, + 344, + 525 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/95e1be1017eb824a9064c72dfcb0e24d6268b917ef5154d0c09a78e835b4975e.jpg", + "image_caption": [ + "Fig. 4. Overview of GarmentBench dataset construction pipeline and samples. (a) Data construction pipeline for GarmentBench. (b) Example samples with multimodal annotations: silhouette, logo, text, logo location, and color." + ], + "image_footnote": [], + "bbox": [ + 388, + 421, + 624, + 525 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a5baa2afde9f8babea314ec85fc8461cf4b88aaa1b3a130882ea511a78b72e22.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 421, + 895, + 525 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\epsilon_{\\theta_l}$ denotes the prediction from the local enhancement model.", + "bbox": [ + 73, + 607, + 491, + 635 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Inference. IMAGGarment supports end-to-end inference through a two-stage pipeline operating in a shared latent space. The global appearance model first generates a latent of coarse garment image conditioned on the input text prompt, silhouette, color, and mask. This process is guided by classifier-free guidance (CFG) [47]:", + "bbox": [ + 73, + 641, + 490, + 733 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\check {\\epsilon} _ {\\theta_ {g}} \\left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\\right) = w \\cdot \\epsilon_ {\\theta_ {g}} \\left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\\right) \\tag {10} \\\\ + (1 - w) \\cdot \\epsilon_ {\\theta_ {g}} \\left(x _ {t}, t\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 756, + 488, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "here, $w$ is the CFG scale and $x_{t}$ denotes the noisy latent at timestep $t$ . The coarse latent is then refined by the local enhancement model, which incorporates user-defined logos and spatial constraints through the $A^3$ module. We apply conditional CFG:", + "bbox": [ + 73, + 815, + 491, + 890 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\check {\\epsilon} _ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) = w \\cdot \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) \\tag {11} \\\\ + (1 - w) \\cdot \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {m}, C _ {g}, t\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 912, + 488, + 949 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "IV. EXPERIMENTS", + "text_level": 1, + "bbox": [ + 645, + 607, + 781, + 621 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Dataset and Metrics", + "text_level": 1, + "bbox": [ + 503, + 633, + 669, + 646 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset Construction. As shown in Fig. 4 (a), we construct and release GarmentBench, a large-scale dataset for fine-grained garment generation, containing multi-modal design conditions such as text, sketches, colors, logos, and location masks. It serves as a controllable and extensible benchmark for advancing personalized fashion generation. The construction process is as follows:", + "bbox": [ + 501, + 655, + 919, + 760 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Image Collection and Preprocessing. We collect over 189K high-quality garment images from the internet, covering a wide range of categories such as tops, bottoms, and dresses. To eliminate background distractions and focus on the garment region, we apply YOLOv8 [48] for clothing detection and perform tight cropping to obtain clean garment-centric images for further processing.", + "(2) Text, Sketch, and Color Extraction. For each image, we automatically generate three auxiliary conditions to simulate real-world design guidance: textual descriptions generated by the multi-modal LLM Qwen-VL-Chat [49], covering key attributes such as color, silhouette, and style; structural sketches" + ], + "bbox": [ + 501, + 762, + 921, + 944 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "obtained using Informative-Drawings [50], providing shape and layout priors; and color palettes extracted from single-color garments identified via ResNet50 [51] and clustered using K-means [52].", + "bbox": [ + 73, + 69, + 491, + 128 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(3) Logo Extraction and Location Annotation. To support logo insertion and spatial control, we further extract local design elements such as logos and prints. We use YOLOv8 to detect visually distinct regions (e.g., anime characters, animal patterns), followed by manual verification to ensure label quality. We also annotate spatial locations and generate binary masks to serve as precise spatial constraints. In total, GarmentBench contains 189,966 garment-condition pairs with rich fine-grained annotations.", + "bbox": [ + 73, + 128, + 491, + 263 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dataset Description. As shown in Fig. 4 (b), we present representative samples from the GarmentBench dataset, which include fine-grained garment images paired with multi-modal conditions such as textual descriptions, structural silhouettes, color references, logos, and spatial location masks. Additionally, we randomly sample images from the Fashion-ControlNet-Dataset-V31 and apply the same preprocessing pipeline as GarmentBench to construct a test set with 1,267 image-condition pairs for evaluation and comparative analysis.", + "bbox": [ + 73, + 265, + 491, + 400 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dataset Statement. GarmentBench is curated from publicly available fashion imagery under a non-commercial research intent. All personal identifiers were removed; third-party logos and brand marks are included solely to evaluate controllability and remain the property of their respective owners. We release only derived annotations and source URLs (not raw images), together with license notices and a takedown procedure; exact split indices and random seeds are provided for reproducibility.", + "bbox": [ + 73, + 401, + 491, + 521 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Metrics. We adopt four metrics to comprehensively evaluate visual quality, conditional consistency, and fine-grained controllability. Fréchet inception distance (FID) [53] measures the distribution similarity between generated and real images, reflecting overall realism. Color structure similarity (CSS) [54] assesses the consistency of color distribution, measuring color controllability. Lastly, Logo location accuracy (LLA) [55] quantifies the spatial deviation between generated and target logo positions, reflecting spatial precision. Learned perceptual image patch similarity (LPIPS) [56] reflects human-perceived visual similarity, effectively capturing structural and textural consistency. These metrics comprehensively assess quality and controllability in fine-grained garment generation.", + "bbox": [ + 73, + 522, + 491, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "B. Implementation Details", + "text_level": 1, + "bbox": [ + 73, + 756, + 259, + 768 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In our experiments, both the silhouette UNet and the denoising UNet are initialized with the pretrained Stable Diffusion v1.5 model2. The local enhancement model is based on the inpainting variant of Stable Diffusion v1.53, with only the self-attention layers being fine-tuned to reduce computational cost. We adopt OpenCLIP ViT-H/144 as the CLIP image encoder. All input images are resized to $512 \\times 640$ resolution. We", + "bbox": [ + 73, + 773, + 491, + 881 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1https://huggingface.co/datasets/Abrumu/Fashion_controlnet_dataset_V3", + "2https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5", + "3https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting", + "4 https://github.com/mlfoundations/open Clip" + ], + "bbox": [ + 86, + 893, + 477, + 944 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8df82528834fc4a351d35f88f4433fb6cd1c95ff062a08c153f3d1f479e21123.jpg", + "table_caption": [ + "TABLE II QUANTITATIVE COMPARISONS ON GARMENTBENCH. OURS ACHIEVES THE TOP RESULTS ACROSS ALL METRICS, WITH BEST IN BOLD." + ], + "table_footnote": [ + "* denotes re-implemented by us for a fair comparison." + ], + "table_body": "
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
BLIP-Diffusion* [41]101.99104.440.130.68
ControlNet-Garment* [22]41.2283.300.360.41
AnyDoor* [59]38.0868.240.650.17
IP-Adapter-Garment* [21]37.9592.950.360.43
IMAGGarment (Ours)17.6336.160.720.10
", + "bbox": [ + 509, + 114, + 926, + 205 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "use the AdamW optimizer [57] with a constant learning rate of $1 \\times 10^{-5}$ . The global appearance model and the local enhancement model are trained for 150K and 50K steps, respectively, using a batch size of 20. During inference, we adopt the DDIM sampler [58] with 50 sampling steps. Unless otherwise specified, the silhouette weight $\\alpha$ and color weight $\\beta$ in Eq.1 and Eq.3 are set to 0.6 and 1.0. The classifier-free guidance (CFG) scale $w$ in Eq.10 and Eq.11 is set to a default value of 7.0.", + "bbox": [ + 501, + 242, + 921, + 377 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "C. Baseline Comparisons", + "text_level": 1, + "bbox": [ + 504, + 397, + 683, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Due to the absence of prior work tailored to fine-grained garment generation with multi-condition control, we compare our method against four representative baselines: BLIP-Diffusion [41], AnyDoor [59], ControlNet [22], and IP-Adapter [21]. For subject-driven generation methods, BLIP-Diffusion [41] leverages a learnable Q-Former to align textual and visual embeddings in the latent space, initially designed for subject-preserving generation from text-image pairs. AnyDoor [59] combines identity and detail encoders to reconstruct personalized content, which we adapt to conditions of garment appearance and logo inputs. For plugin-based baselines, we extend ControlNet [22] and IP-Adapter [21] by duplicating and modifying their conditional branches to support multi-conditional inputs, such as silhouette, color, and logo. The adapted versions are referred to as ControlNet-Garment and IP-Adapter-Garment. Specifically, for ControlNet-Garment, we input silhouette, color, logo and mask maps into the ControlNet branch and inject them at each downsampling block, following standard practice. For IP-Adapter-Garment, we extend the official implementation to accept silhouette, color, logo and mask embeddings, which are concatenated and injected via cross-attention. To ensure task relevance, all methods are fine-tuned on our GarmentBench dataset with support for logo-specific conditioning. All methods are trained and evaluated under identical training protocols, input resolutions, and hardware setups. The corresponding quantitative and qualitative results are presented in Table II and Fig. 5, respectively, with detailed analysis provided below.", + "bbox": [ + 501, + 415, + 921, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative Results. As shown in Table II, IMAGGarment achieves the best performance across all four metrics on the GarmentBench dataset, demonstrating its superiority in controllable fine-grained garment generation. Compared to subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]), which rely on global features for personalized reconstruction, IMAGGarment shows substantial improvements in FID,", + "bbox": [ + 501, + 839, + 921, + 944 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 75, + 29, + 416, + 41 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d3dc9901c6ffb37b8aa90fce37338bfe51ad35aff524d666f4aa800a8bbe3e52.jpg", + "image_caption": [ + "Fig. 5. Qualitative results on seen and unseen GarmentBench samples. The seen set uses original test pairs, while the unseen set involves randomly mixed conditions. IMAGGarment delivers the most consistent outputs, achieving accurate silhouette, color, and logo control across both settings." + ], + "image_footnote": [], + "bbox": [ + 81, + 66, + 491, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/88692487b4d265fac91ccd274914ea963a8e85d3d96e1a9f6c38b9f571456b6a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 66, + 923, + 319 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/61e30aa29addae63474715749ba0ffbf5c696de92b586801186800fc3726cf11.jpg", + "table_caption": [ + "TABLE III QUANTITATIVE ABLATION RESULTS ON GARMENTBENCH." + ], + "table_footnote": [], + "table_body": "
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
B0139.33104.540.150.64
B147.4236.650.300.15
B230.1997.050.560.33
B321.2043.000.650.11
B446.16108.250.520.38
Full17.6336.160.720.10
", + "bbox": [ + 78, + 398, + 496, + 503 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "LPIPS, and CSS. These gains highlight the effectiveness of our mixed attention and color adapter modules in achieving coherent multi-condition fusion, resulting in more realistic, perceptually consistent, and color-faithful outputs. In contrast to plugin-based approaches (ControlNet-Garment [22], IP-Adapter-Garment [21]) that simply stack independent conditional branches, IMAGGarment yields significantly higher LLA, reflecting more precise logo placement. Our proposed $\\mathrm{A}^3$ module drives these improvements, which adaptively injects spatial priors and logo features into the latent space for accurate local control. Overall, these results indicate that global-only conditioning or naive plugin stacking is insufficient for fine-grained control. By contrast, IMAGGarment provides an effective solution for multi-conditional garment synthesis, enabling precise coordination of global structure and local detail.", + "bbox": [ + 73, + 534, + 491, + 775 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative Results. Fig. 5 presents qualitative comparisons on both seen and unseen garments. Notably, the seen test set refers to the designated test split of our GarmentBench dataset. In the absence of other suitable public datasets, we assess generalization using an unseen-composition test split constructed by randomly recombining input conditions (e.g., silhouette, color, logo) into combinations that never appear during training, thereby simulating real-world fashion-design scenarios. On seen garments, subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]) reconstruct global appearance but lack spatial control. BLIP-Diffusion retains", + "bbox": [ + 73, + 777, + 491, + 945 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "logo identity yet fails at precise placement due to text-only conditioning, while AnyDoor introduces logo distortions and stylistic artifacts. Plugin-based baselines (ControlNet-Garment [22], IP-Adapter-Garment [21]) treat conditions independently, resulting in poor coordination. ControlNet-Garment suffers from cross-condition interference, and IP-Adapter-Garment often misplaces logos despite preserving structure. In contrast, IMAGGarment achieves accurate control over silhouette, color, and logo placement. On unseen garments, all baselines degrade notably. Subject-driven methods fail to generalize to novel layouts, AnyDoor distorts appearance, and BLIP-Diffusion struggles with logo positioning. Plugin-based methods also falter: ControlNet-Garment produces mismatched outputs, and IP-Adapter-Garment cannot interpret unseen spatial semantics. IMAGGarment remains robust, maintaining alignment across all conditions. This generalization stems from our $A^3$ module, which effectively integrates spatial and visual cues in the latent space. These results validate the controllability and flexibility of our method in both seen and unseen settings.", + "bbox": [ + 501, + 367, + 921, + 670 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "D. Ablation Study", + "text_level": 1, + "bbox": [ + 504, + 696, + 633, + 710 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To validate the effectiveness of each component in our framework, we design a series of ablation variants within the IMAGGarment architecture: B0 uses the vanilla Stable Diffusion v1.5 without any of our proposed modules, serving as the baseline. B1 removes the local enhancement model (Stage II), evaluating the impact of omitting logo injection and spatial control. B2 removes the global appearance model (Stage I), assessing the model's performance without structured silhouette and color conditioning. B3 removes the color adapter from the global appearance model, isolating the role of color guidance in generation. B4 replaces our mixed attention with vanilla self-attention in the denoising UNet, testing the importance of spatial fusion with silhouette features. Full represents the complete IMAGGarment framework with all proposed modules integrated.", + "bbox": [ + 501, + 717, + 921, + 945 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ecb858eaa61509df14d51c16c06814ec844adac3bb7acb8a7e175d967928b21e.jpg", + "image_caption": [ + "Fig. 6. Qualitative ablation results on GarmentBench. The \"Full\" configuration achieves the best results, highlighting the importance of each component." + ], + "image_footnote": [], + "bbox": [ + 89, + 64, + 901, + 508 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ablation of Architecture Design. Table III presents the quantitative impact of each component in our proposed IMAGGarment. In B1, which removes the local enhancement stage, the model struggles to place logos precisely, leading to degraded LLA. Although the overall garment structure is preserved, the lack of spatial control prevents accurate logo integration. In B2, without the global appearance stage, the model fails to maintain silhouette and color consistency, resulting in significantly worse FID, LPIPS, and CSS. This demonstrates that local injection alone is insufficient to handle global garment layouts. B3 disables the color adapter, causing notable drops in CSS, highlighting its role in faithful color transfer and control. B4 replaces our mixed attention with standard self-attention, which weakens the fusion of silhouette guidance and causes drops in both LPIPS and FID, indicating reduced realism and structural coherence. The full IMAGGarment achieves the best performance across all metrics, validating the complementary design of each module's effectiveness in handling multi-condition garment generation. Further, Fig. 6 shows qualitative comparisons. B1 fails to align logos spatially, while B2 produces distorted garments lacking color and silhouette guidance. Despite maintaining logo placement, B3 leads to color mismatch, and B4 generates less coherent garment layouts. In contrast, the full model successfully synthesizes garments with accurate silhouettes, precise logo placement, and faithful color reproduction, demonstrating the benefits", + "bbox": [ + 73, + 537, + 493, + 931 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "of our dual-stage design, color adapter, and mixed attention fusion. Overall, The \"Full\" configuration achieves the best results, highlighting the importance of each component.", + "bbox": [ + 503, + 537, + 921, + 585 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "E. More Results and Analysis", + "text_level": 1, + "bbox": [ + 504, + 607, + 712, + 622 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Controllability Analysis. We assess controllability by varying a single condition at a time (silhouette, color palette, or logo position) while keeping the others fixed. As shown in Fig. 7, each three column block visualizes the model's response to one condition. Changing the silhouette (left block) yields garments that match the target shapes, indicating that the mixed attention module preserves structural alignment. Varying the color palette (middle block) produces the intended color distributions, validating the color adapter for color faithful generation. Adjusting the logo position (right block) achieves precise spatial relocation, showing that the $A^3$ module effectively injects spatial priors for local control. Overall, IMAGGarment provides fine-grained and decoupled control of garment attributes suitable for practical design workflows. Non-varied attributes remain stable across manipulations, reflecting minimal cross-condition interference and consistent editing behavior. Sequential composition of edits across attributes produces similar outcomes regardless of edit order, which suggests low inter-attribute coupling. Control fidelity also holds under moderate changes of viewpoint and background, supporting robustness in real design scenarios.", + "bbox": [ + 501, + 627, + 923, + 946 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 30, + 919, + 39 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4bb9d8f0e0b3f923a99112f702d822e720cb125c76e10189c0e33f9fe0f8cf42.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 98, + 66, + 344, + 444 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/788e48ddfe0a081261d24ffc7f63ecfa0c2323f0e1a6000ec21737630ee71b22.jpg", + "image_caption": [ + "Fig. 7. Controllability visualization. Each block varies one input condition while keeping others fixed. Left: Silhouette changes lead to consistent structural adaptation. Middle: Color palette variation results in accurate color transfer. Right: Logo mask adjustment yields precise spatial placement." + ], + "image_footnote": [], + "bbox": [ + 369, + 66, + 620, + 448 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/39d07a318b3669a4b33b0957f8d8651bb2279bd1192b7881c0f6b6c4a389919c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 66, + 895, + 448 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f8afae54c6c3fe55f79462f8c82eac82388206a87f2840c2e634af16eb091fed.jpg", + "image_caption": [ + "Fig. 8. Hyperparameter analysis of silhouette weight $\\alpha$ and color weight $\\beta$ ." + ], + "image_footnote": [], + "bbox": [ + 96, + 474, + 897, + 648 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Hyperparameter Analysis. We study the effect of two key hyperparameters in Eq.1 and Eq.3: the silhouette guidance weight $\\alpha$ and the color conditioning weight $\\beta$ . From Fig. 8, varying $\\alpha$ directly impacts the model's ability to follow the reference silhouette. When $\\alpha$ is too low, the generated structure becomes blurry or deviates from the target shape; when too high, it may suppress color and text guidance. We empirically set $\\alpha = 0.6$ for balanced structural alignment. Similarly, the color weight $\\beta$ controls the influence of the color palette. As $\\beta$ increases, color consistency improves steadily, with $\\beta = 1.0$ yielding the best visual fidelity. Joint sweeps over $(\\alpha, \\beta)$ indicate a broad stability region around $\\alpha \\in [0.5, 0.7]$ and $\\beta \\in [0.8, 1.1]$ , showing robustness to moderate mistuning. Interaction effects are mild: very large $\\alpha$ slightly narrows the effective range of $\\beta$ , while very large $\\beta$ can oversaturate colors and reduce shading nuance. We therefore adopt $\\alpha = 0.6$ and $\\beta = 1.0$ throughout all experiments.", + "bbox": [ + 73, + 678, + 491, + 935 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "V. CONCLUSION", + "text_level": 1, + "bbox": [ + 653, + 679, + 772, + 691 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We propose IMAGGarment, a unified conditional diffusion framework for fine-grained garment generation with precise control over silhouette, color, and logo placement. By introducing mixed attention, color adapter, and the $A^3$ module, our framework explicitly disentangles global structure (silhouette and color) from local attributes (logo content and spatial placement), enabling accurate spatial control and high-quality synthesis. To support this task, we construct GarmentBench, a large-scale benchmark with over 180K samples annotated with multi-level design conditions. Comprehensive experiments on both seen and unseen garments demonstrate that IMAGGarment achieves state-of-the-art results in structure fidelity, color consistency, and logo controllability. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment.", + "bbox": [ + 503, + 717, + 921, + 944 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 235, + 69, + 331, + 82 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Aijia Zhang, Weiqiang Jia, Qiang Zou, Yixiong Feng, Xiaoxiang Wei, and Ye Zhang. Diffusion-cad: Controllable diffusion model for generating computer-aided design models. IEEE Transactions on Visualization and Computer Graphics, 2025.", + "[2] Xiongzheng Li, Jing Huang, Jinsong Zhang, Xiaokun Sun, Haibiao Xuan, Yu-Kun Lai, Yingdi Xie, Jingyu Yang, and Kun Li. Learning to infer inner-body under clothing from monocular video. IEEE Transactions on Visualization and Computer Graphics, 29(12):5083-5096, 2022.", + "[3] Nannan Zhang, Zhenyu Xie, Zhengwentai Sun, Hairui Zhu, Zirong Jin, Nan Xiang, Xiaoguang Han, and Song Wu. Viton-gun: Person-to-person virtual try-on via garment unwrapping. IEEE Transactions on Visualization and Computer Graphics, 2025.", + "[4] Wen-Yang Zhou, Lu Yuan, Shu-Yu Chen, Lin Gao, and Shi-Min Hu. Lcnerf: Local controllable face generation in neural radiance field. IEEE Transactions on Visualization and Computer Graphics, 30(8):5437-5448, 2023.", + "[5] Pinaki Nath Chowdhury, Tuanfeng Wang, Duygu Ceylan, Yi-Zhe Song, and Yulia Gryaditskaya. Garment ideation: Iterative view-aware sketch-based garment modeling. In 2022 International Conference on 3D Vision (3DV), pages 22-31. IEEE, 2022.", + "[6] Yu Jin and Kyungho Lee. Human-ai co-creation in fashion design ideation and sketching: an empirical study. In Proceedings of IEEE/CVF Computer Vision and Pattern Recognition Conference (CVPR), CVFAD Workshop, Seattle, USA, 2024.", + "[7] Funda Durupynar and Ugur Gudukbay. A virtual garment design and simulation system. In 2007 11th International Conference Information Visualization (IV'07), pages 862-870. IEEE, 2007.", + "[8] Saikrupa PA et al. Smart stitch: A mobile app for personalized garment customization and stitching guidance. In 2025 International Conference on Data Science, Agents & Artificial Intelligence (ICDSAAI), pages 1-5. IEEE, 2025.", + "[9] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. 2023. arXiv:2307.01952.", + "[10] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. 2022. ArXiv:2210.02747.", + "[11] Wanchao Su, Hui Ye, Shu-Yu Chen, Lin Gao, and Hongbo Fu. Drawingstyles: Portrait image generation and editing with spatially conditioned stylegan. IEEE transactions on visualization and computer graphics, 29(10):4074-4088, 2022.", + "[12] Changjian Chen, Fei Lv, Yalong Guan, Pengcheng Wang, Shengjie Yu, Yifan Zhang, and Zhuo Tang. Human-guided image generation for expanding small-scale training image datasets. IEEE Transactions on Visualization and Computer Graphics, 2025.", + "[13] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. In Proceedings of the ACM SIGGRAPH Conference, pages 1–11, 2023.", + "[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1125-1134, 2017.", + "[15] Subhadeep Koley, Ayan Kumar Bhunia, Deeptanshu Sekhri, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. It's all about your sketch: Democratising sketch control in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7204-7214, 2024.", + "[16] Subhadeep Koley, Ayan Kumar Bhunia, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. Text-to-image diffusion models are great sketch-photo matchmakers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16826-16837, 2024.", + "[17] Taewook Kim, Ze Wang, Zhengyuan Yang, Jiang Wang, Lijuan Wang, Zicheng Liu, and Qiang Qiu. Conditional text-to-image generation with reference guidance. 2024. ArXiv:2411.16713.", + "[18] Jinghao Zhang, Wen Qian, Hao Luo, Fan Wang, and Feng Zhao. Anylogo: Symbiotic subject-driven diffusion system with gemini status. 2024. ArXiv:2409.17740.", + "[19] Mingkang Zhu, Xi Chen, Zhongdao Wang, Hengshuang Zhao, and Jiaya Jia. Logosticker: Inserting logos into diffusion models for customized generation. In Proceedings of European Conference on Computer Vision, pages 363-378, 2024." + ], + "bbox": [ + 76, + 95, + 491, + 944 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[20] Mingzhe Yu, Yunshan Ma, Lei Wu, Changshuo Wang, Xue Li, and Lei Meng. Fashiondpo: Fine-tune fashion outfit generation model using direct preference optimization. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 212-222, 2025.", + "[21] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. 2023. ArXiv:2308.06721.", + "[22] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023.", + "[23] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022.", + "[24] Shu-Yu Chen, Wanchao Su, Lin Gao, Shihong Xia, and Hongbo Fu. Deepfacedrawing: deep generation of face images from sketches. ACM Transactions on Graphics, 39(4), August 2020.", + "[25] Shu-Yu Chen, Feng-Lin Liu, Yu-Kun Lai, Paul L. Rosin, Chunpeng Li, Hongbo Fu, and Lin Gao. Deepfaceediting: deep face generation and editing with disentangled geometry and appearance control. ACM Transactions on Graphics, 40(4), July 2021.", + "[26] Xian Wu, Chen Wang, Hongbo Fu, Ariel Shamir, Song-Hai Zhang, and Shi-Min Hu. Deepportraitdrawing: Generating human body images from freehand sketches, 2022. ArXiv:2205.02070.", + "[27] Arnab Ghosh, Richard Zhang, Puneet K Dokania, Oliver Wang, Alexei A Efros, Philip HS Torr, and Eli Shechtman. Interactive sketch & fill: Multiclass sketch-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1171-1180, 2019.", + "[28] Wengling Chen and James Hays. Sketchygan: Towards diverse and realistic sketch to image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 9416-9425, 2018.", + "[29] Zeyu Li, Cheng Deng, Erkun Yang, and Dacheng Tao. Staged sketch-to-image synthesis via semi-supervised generative adversarial networks. IEEE Transactions on Multimedia, 23:2694-2705, 2020.", + "[30] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE Signal Processing Magazine, 35(1):53-65, 2018.", + "[31] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. 2014. ArXiv:1411.1784.", + "[32] Yifang Men, Yiming Mao, Yuning Jiang, Wei-Ying Ma, and Zhouhui Lian. Controllable person image synthesis with attribute-decomposed gan. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 5084-5093, 2020.", + "[33] Yifan Liu, Zengchang Qin, Zhenbo Luo, and Hua Wang. Auto-painter: Cartoon image generation from sketch by using conditional generative adversarial networks. 2017. ArXiv:1705.01908.", + "[34] Yuanzheng Ci, Xinzhu Ma, Zhihui Wang, Haojie Li, and Zhongxuan Luo. User-guided deep anime line art colorization with conditional adversarial networks. In Proceedings of the 26th ACM International Conference on Multimedia, page 1536-1544, 2018.", + "[35] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017.", + "[36] Liqian Ma, Xu Jia, Qianru Sun, Bernt Schiele, Tinne Tuytelaars, and Luc Van Gool. Pose guided person image generation. In Proceedings of the Conference on Neural Information Processing Systems, page 405-415, 2017.", + "[37] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3408-3416, 2018.", + "[38] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proceedings of the Conference on Neural Information Processing Systems, 2020.", + "[39] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. 2020. ArXiv:2011.13456.", + "[40] Junyao Gao, Yanan Sun, Fei Shen, Xin Jiang, Zhening Xing, Kai Chen, and Cairong Zhao. Faceshot: Bring any character into life. 2025. ArXiv:2503.00740.", + "[41] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. Proceedings of the Conference on Neural Information Processing Systems, 36:30146-30166, 2023." + ], + "bbox": [ + 506, + 70, + 919, + 944 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[42] Fei Shen, Xin Jiang, Xin He, Hu Ye, Cong Wang, Xiaoyu Du, Zechao Li, and Jinhui Tang. Imagdressing-v1: Customizable virtual dressing. 2024. ArXiv:2407.12705.", + "[43] Ente Lin, Xujie Zhang, Fuwei Zhao, Yuxuan Luo, Xin Dong, Long Zeng, and Xiaodan Liang. Dreamfit: Garment-centric human generation via a lightweight anything-dressing encoder. 2024. ArXiv:2412.17644.", + "[44] Weifeng Chen, Tao Gu, Yuhao Xu, and Arlene Chen. Magic clothing: Controllable garment-driven image synthesis. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6939-6948, 2024.", + "[45] Yuhao Xu, Tao Gu, Weifeng Chen, and Chengcai Chen. Ootdiffusion: Outfitting fusion based latent diffusion for controllable virtual try-on. 2024. ArXiv:2403.01779.", + "[46] Xujie Zhang, Binbin Yang, Michael C Kampffmeyer, Wenqing Zhang, Shiyue Zhang, Guansong Lu, Liang Lin, Hang Xu, and Xiaodan Liang. Diffcloth: Diffusion based garment synthesis and manipulation via structural cross-modal semantic alignment. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23154-23163, 2023.", + "[47] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. 2022. ArXiv:2207.12598.", + "[48] Muhammad Hussain. Yolov5, yolov8 and yolov10: The go-to detectors for real-time vision, 2024. ArXiv:2407.02988.", + "[49] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond, 2023. ArXiv:2308.12966.", + "[50] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7915-7925, 2022.", + "[51] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. ArXiv:1512.03385.", + "[52] J. MacQueen. Some methods for classification and analysis of multivariate observations. In Proceedings of the 5th Berkeley Symposium on Mathematical Statistics and Probability, pages 281-297, 1967.", + "[53] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Proceedings of the Conference on Neural Information Processing Systems, 30, 2017.", + "[54] Kai Zeng, Zhou Wang, Anmin Zhang, Zhaohui Wang, and Wenjun Zhang. A color structural similarity index for image quality assessment. In Proceedings of the IEEE International Conference on Image Processing (ICIP), pages 660-664, 2014.", + "[55] Masato Fujitake. Rl-logo: Deep reinforcement learning localization for logo recognition. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2830-2834. IEEE, 2024.", + "[56] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018.", + "[57] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. ArXiv:1711.05101.", + "[58] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. ArXiv:2010.02502.", + "[59] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024." + ], + "bbox": [ + 76, + 71, + 491, + 739 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021", + "bbox": [ + 76, + 29, + 416, + 41 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_model.json b/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c9d4a4224dec62c8120244bfe40dd94c12974a3a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_model.json @@ -0,0 +1,2677 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "1" + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.071, + 0.905, + 0.141 + ], + "angle": 0, + "content": "IMAGGarment: Fine-Grained Garment Generation for Controllable Fashion Design" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.148, + 0.849, + 0.165 + ], + "angle": 0, + "content": "Fei Shen, Jian Yu, Cong Wang, Xin Jiang, Xiaoyu Du, and Jinhui Tang, Senior Member, IEEE" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.221, + 0.493, + 0.526 + ], + "angle": 0, + "content": "Abstract—This paper presents IMAGGarment, a fine-grained garment generation (FGG) framework that enables high-fidelity garment synthesis with precise control over silhouette, color, and logo placement. Unlike existing methods that are limited to single-condition inputs, IMAGGarment addresses the challenges of multi-conditional controllability in personalized fashion design and digital apparel applications. Specifically, IMAGGarment employs a two-stage training strategy to separately model global appearance and local details, while enabling unified and controllable generation through end-to-end inference. In the first stage, we propose a global appearance model that jointly encodes silhouette and color using a mixed attention module and a color adapter. In the second stage, we present a local enhancement model with an adaptive appearance-aware module to inject user-defined logos and spatial constraints, enabling accurate placement and visual consistency. To support this task, we release GarmentBench, a large-scale dataset comprising over 180K garment samples paired with multi-level design conditions, including sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that our method outperforms existing baselines, achieving superior structural stability, color fidelity, and local controllability performance. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.53, + 0.492, + 0.57 + ], + "angle": 0, + "content": "Index Terms—Fine-Grained Garment Generation, Multi-Conditional Generation, Fashion Design Applications, Garment-Bench Dataset." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.593, + 0.352, + 0.607 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.613, + 0.491, + 0.703 + ], + "angle": 0, + "content": "Fine-Grained garment generation (FGG) aims to synthesize high-quality garments with precise control over garment silhouette, color scheme, logo content, and spatial placement. As personalized fashion and the digital apparel market grow rapidly, fine-grained controllability [1]–[4] is increasingly crucial for applications in fashion design and e-commerce." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.704, + 0.492, + 0.751 + ], + "angle": 0, + "content": "In traditional garment ideation [5], [6] and visualization [7], [8], designers analyze line drawings to establish silhouette and construction, then select color palettes and materials, and" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.762, + 0.49, + 0.808 + ], + "angle": 0, + "content": "Fei Shen is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the NExT++ Research Centre, National University of Singapore, Singapore, e-mail: shenfei29@nus.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.808, + 0.492, + 0.853 + ], + "angle": 0, + "content": "Jian Yu, Xin Jiang, and Xiaoyu Du are with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China. e-mail: jianyu@njust.edu.cn; xinjiang@njust.edu.cn; duxy@njust.edu.cn." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.853, + 0.492, + 0.887 + ], + "angle": 0, + "content": "Cong Wang is with the State Key Laboratory for Novel Software Technology and the School of Computer Science, Nanjing University, Nanjing, 210023, China. e-mail: cw@smail.nju.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.887, + 0.492, + 0.945 + ], + "angle": 0, + "content": "Jinhui Tang is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the College of Information Science and Technology and Artificial Intelligence, Nanjing Forestry University, Nanjing 210037, China, e-mail: jinhuitang@njust.edu.cn. (Corresponding author: Jinhui Tang.)" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.22, + 0.923, + 0.401 + ], + "angle": 0, + "content": "finally arrange brand elements such as logos and trims. This manual workflow has two persistent drawbacks. First, it is time consuming: to match the specification, edits must be applied object by object and view by view; in a seasonal collection, even identical panels within the same board are recolored or relabeled one at a time, which does not scale. Second, it is error prone and inconsistent: small deviations in hue, shading, or logo placement arise across artists and rounds of revision, yielding mismatches across styles, sizes, and camera viewpoints. As project scope grows, these issues inflate turnaround time and complicate quality control and version management." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.402, + 0.923, + 0.763 + ], + "angle": 0, + "content": "Recently, image synthesis [9]–[12] has made notable progress in tasks such as sketch-to-image generation [13]–[16] and logo insertion [17]–[19] (as illustrated in Fig. 1 (a)), demonstrating basic capabilities in structural and content-level control. However, these tasks [13], [17], [20] provide only coarse guidance and rely on single-condition inputs (e.g., sketch or color), lacking the fine-grained controllability needed to model the nuanced interactions between global structure and local details in garment design. Although sequential or modular combinations may offer partial solutions, they [21]–[23] fail to explicitly disentangle and jointly model global attributes (e.g., silhouette, color) and local appearance details (e.g., logo content and spatial placement). Without unified control mechanisms, these approaches [21]–[23] often suffer from condition entanglement, conflicting objectives, and visual inconsistencies, ultimately falling short of the high standards required in real-world fashion design. In contrast, practical fashion design [5], [6] requires joint control over multiple interdependent factors: designers determine global attributes such as silhouette and color, followed by fine-tuning of local elements like logos and their placement. To support this process, a unified generation task that clearly separates and coordinates global and local attributes is essential for controllable and high-fidelity synthesis." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.765, + 0.923, + 0.945 + ], + "angle": 0, + "content": "To address these limitations, we propose a new task: fine-grained garment generation (FGG), as illustrated in Fig. 1 (b). FGG is formulated as a unified multi-conditional garment synthesis task, taking a textual prompt, garment silhouette, color palette, and spatially constrained logos as joint inputs. It aims to generate garments that faithfully reflect high-level structural intent and fine-grained local styling cues. FGG is specifically designed to mirror real-world fashion workflows, where designers must coordinate diverse input modalities to express creative intent. Unlike conventional approaches that process each condition independently or sequentially, FGG emphasizes joint modeling and hierarchical reasoning across" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.283, + 0.061, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.13176v2 [cs.CV] 8 Sep 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "2" + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.057, + 0.293, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.15, + 0.208, + 0.427, + 0.223 + ], + "angle": 0, + "content": "(a) Sketch-to-image and logo insertion task" + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.057, + 0.497, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.225, + 0.498, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.056, + 0.882, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.573, + 0.209, + 0.833, + 0.223 + ], + "angle": 0, + "content": "(b) Fine-grained garment generation task" + }, + { + "type": "image", + "bbox": [ + 0.53, + 0.225, + 0.899, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.337, + 0.446, + 0.686, + 0.461 + ], + "angle": 0, + "content": "(c) Generalization capability in real-world applications" + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.473, + 0.924, + 0.51 + ], + "angle": 0, + "content": "Fig. 1. Comparison of (a) existing sketch-to-image and logo insertion tasks with (b) our proposed fine-grained garment generation (FGG) task, which enables precise and controllable synthesis of garment structure, color, logo, and spatial placement. Unlike previous tasks that rely on a single input condition, FGG is tailored for real-world fashion design workflows by integrating multiple conditional controls." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.535, + 0.492, + 0.732 + ], + "angle": 0, + "content": "input types. It goes beyond simple task combinations by enforcing consistent integration of global and local attributes within a unified generation framework, enabling nuanced control over the overall structure and detailed appearance. Specifically, FGG task introduces three key challenges: (1) maintaining visual and semantic consistency across heterogeneous input conditions, (2) resolving conflicts between global structures and localized visual elements, and (3) generalizing to unseen condition combinations without retraining (see Fig. 1(c)). FGG thus marks a fundamental shift from single-condition or loosely coupled pipelines toward a unified, design-intent-driven generation paradigm that better reflects the complexity of real-world garment design." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.734, + 0.493, + 0.946 + ], + "angle": 0, + "content": "To this end, we propose IMAGGarment, a two-stage training and end-to-end inference framework tailored for fine-grained garment generation. Unlike prior methods that rely on single-condition inputs or simple feature fusion, our framework is explicitly designed to achieve fine-grained controllability under multiple, interdependent constraints. In the first stage, we propose a global appearance model with a mixed attention module and a color adapter to jointly encode garment silhouette and color palette, improving overall appearance fidelity and mitigating condition entanglement. In the second stage, we present a local enhancement model equipped with an adaptive appearance-aware module to inject user-defined logos and their spatial constraints, enabling precise logo placement while preserving global consistency. To further promote research in" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.535, + 0.922, + 0.655 + ], + "angle": 0, + "content": "this direction, we release GarmentBench, a large-scale dataset comprising over 180k garment samples annotated with rich multi-level design conditions, including silhouette sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that IMAGGarment significantly outperforms existing baselines in terms of structural stability and local controllability. To summarize, the main contributions are listed as follows:" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.657, + 0.922, + 0.717 + ], + "angle": 0, + "content": "- We propose IMAGGarment, a controllable garment generation framework that enables precise control over garment structure, color, and logo placement, addressing the challenges of FGG." + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.718, + 0.922, + 0.778 + ], + "angle": 0, + "content": "- We design a mixed attention module, color adapter, and adaptive appearance-aware module to disentangle global structure from local attributes, achieving fine-grained visual control and accurate spatial control." + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.779, + 0.922, + 0.838 + ], + "angle": 0, + "content": "- We release GarmentBench, a large-scale dataset with diverse garments and rich multi-conditional annotations, serving as a valuable benchmark for controllable garment generation research." + }, + { + "type": "list", + "bbox": [ + 0.521, + 0.657, + 0.922, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.84, + 0.922, + 0.946 + ], + "angle": 0, + "content": "The remainder of this paper is organized as follows. Section II surveys prior work on garment generation, encompassing GAN-based techniques and diffusion-based controllable generation. Section III describes the proposed IMAGGarment methodology, comprising a global appearance model with mixed attention and a color adapter, a local enhancement model with the A3 module, and the associated training and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.132 + ], + "angle": 0, + "content": "inference strategies. Section IV presents the experimental protocol and results, including the GarmentBench dataset and evaluation metrics, implementation details, and results and analysis. Section V concludes the paper." + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.145, + 0.358, + 0.159 + ], + "angle": 0, + "content": "II. RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.165, + 0.245, + 0.179 + ], + "angle": 0, + "content": "A. GAN-Based Methods" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.183, + 0.493, + 0.426 + ], + "angle": 0, + "content": "Early approaches [24]–[29] to garment generation predominantly build on generative adversarial networks (GANs) [30]–[32], with a major line devoted to sketch-to-image translation [33] that learns spatial mappings from structural cues. Representative systems such as DeepFaceDrawing [24] and DeepFaceEditing [25] decompose sketches into semantic components and progressively assemble photorealistic results, while DeepPortraitDrawing [26] extends this paradigm to full-body synthesis via local-to-global pipelines. Interactive frameworks [27] further introduce gating mechanisms for user-guided editing, and DALColor [34] combines WGAN-GP [35] with line-art colorization for refined appearance control. Beyond sketches, related GAN-based efforts explore pose- or part-guided generation [36], [37], leveraging learned warping or deformable alignment to better propagate structural constraints from sources to targets." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.425, + 0.495, + 0.609 + ], + "angle": 0, + "content": "However, these methods [24]–[27] are largely restricted to single-condition settings (e.g., sketches or poses alone), making it difficult to support real-world fashion scenarios that require joint control over multiple factors such as silhouette, garment layers, color/pattern, and local embellishments. Moreover, adversarial training is prone to instability and visual artifacts [32], [36], [37], and the reliance on paired or carefully aligned supervision limits robustness to occlusion, diverse body shapes, and open-world catalogs. As a result, while GAN-based pipelines can produce plausible textures under constrained conditions, they struggle to achieve reliable, fine-grained, and multi-conditional controllability at scale." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.625, + 0.271, + 0.64 + ], + "angle": 0, + "content": "B. Diffusion-Based Methods" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.643, + 0.493, + 0.809 + ], + "angle": 0, + "content": "Diffusion models [38]–[40] have achieved strong progress in conditional image generation owing to their iterative denoising process and flexible conditioning interfaces. To improve controllability with minimal modification to large backbones, plugin-based approaches such as IP-Adapter [21], ControlNet [22], and BLIP-Diffusion [41] inject external conditions (e.g., reference images, structural maps, or language cues) through lightweight adapters. In parallel, reference-guided or dual-stream designs [42]–[45] propagate features from exemplars alongside text/image prompts, thereby strengthening identity preservation and fine control during sampling." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.81, + 0.493, + 0.947 + ], + "angle": 0, + "content": "In fashion-related applications, DiffCloth [46] supports localized garment edits via part-specific textual prompts, enabling independent control over regions such as sleeves and collars. For logo-centric generation, AnyLogo [18] adopts a dual-state denoising strategy to retain subtle logo details; LogoSticker [19] performs token-based injection to flexibly place logo elements; and RefDiffuser [17] leverages expert-driven plugins to enhance texture fidelity and spatial alignment. Despite these advances, most methods emphasize either global" + }, + { + "type": "table_caption", + "bbox": [ + 0.562, + 0.072, + 0.866, + 0.095 + ], + "angle": 0, + "content": "TABLEI DEFINITIONS OF MAIN SYMBOLS USED IN THIS PAPER." + }, + { + "type": "table", + "bbox": [ + 0.568, + 0.104, + 0.857, + 0.359 + ], + "angle": 0, + "content": "
NotationDefinition
tTimestep
ZtLatent feature at t step
ZmOutput of mixed attention
x0Real image
xtNoisy data at t step
GGarment image
LLogo image
MMask image
CgFeature of garment image
ClFeature of logo image
CmFeature of mask image
CsFeature of silhouette image
CcFeature of color image
CtFeature of text prompt
θgGlobal appearance model
θlLocal enhancement model
εGaussian noise
αtCumulative product of noise weights
wGuidance scale
αSilhouette scale
βColor scale
" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.384, + 0.922, + 0.461 + ], + "angle": 0, + "content": "appearance control or localized editing in isolation. A unified framework that jointly models multiple design conditions, e.g., silhouette and layer topology together with color/pattern and local embellishments, while maintaining structural coherence across the denoising trajectory remains underexplored." + }, + { + "type": "title", + "bbox": [ + 0.638, + 0.478, + 0.789, + 0.492 + ], + "angle": 0, + "content": "III. METHODOLOGY" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.497, + 0.922, + 0.542 + ], + "angle": 0, + "content": "Symbol Definition. To introduce our IMAGGarment method more clearly, we define the main symbols used throughout the paper in TABLE I." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.542, + 0.922, + 0.68 + ], + "angle": 0, + "content": "Task Definition. Given a garment silhouette, color palette, user-defined logo, location and an optional text description, fine-grained garment generation (FGG) aims to synthesize high-fidelity garment images with precise control over both global structure and local visual attributes. The key challenges lie in jointly modeling multi-conditional inputs, maintaining semantic and visual consistency across different design factors, and supporting controllable placement of fine-grained elements such as logos and color regions." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.7, + 0.664, + 0.713 + ], + "angle": 0, + "content": "A. Overall Framework" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.718, + 0.922, + 0.947 + ], + "angle": 0, + "content": "To address the above challenges, we propose IMAGGarment, a conditional diffusion framework tailored for fine-grained garment generation. Our framework comprises two components: a global appearance model (stage I) and a local enhancement model (stage II), which explicitly disentangle and jointly control the global appearance and local details under multi-conditional guidance, enabling accurate synthesis of garment silhouette, color, and logo placement. As illustrated in Fig. 2, the global appearance model first generates a latent of coarse garment image conditioned on the textual prompt, garment silhouette, and color palette. Subsequently, the local enhancement model refines this latent representation by integrating user-defined logo and spatial constraint, producing the final high-fidelity garment image with fine-grained controllability. Specifically, the global appearance model (Section III-B)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "4" + }, + { + "type": "image", + "bbox": [ + 0.077, + 0.069, + 0.491, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.16, + 0.493, + 0.208 + ], + "angle": 0, + "content": "Fig. 2. Visualization of the IMAGGarment inference pipeline. The global appearance model generates coarse latent from textual prompts, silhouettes, and colors. The local enhancement model then injects user-defined logos and spatial location constraints to produce the fine-grained garment." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.235, + 0.492, + 0.371 + ], + "angle": 0, + "content": "leverages our proposed mixed attention module and color adapter to effectively capture global appearance features from textual descriptions, silhouettes, and colors, while mitigating entanglement among these conditions. The local enhancement model (Section III-C) introduces an adaptive appearance-aware module (\\(A^3\\) Module) that injects logo content and spatial location constraint into the latent space, achieving precise logo placement. Finally, the training and inference strategies used in IMAGGarment are summarized in Section III-D." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.399, + 0.34, + 0.414 + ], + "angle": 0, + "content": "B. Stage I: Global Appearance Model" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.421, + 0.491, + 0.51 + ], + "angle": 0, + "content": "Motivation. Existing garment generation methods [21]–[23] typically rely on single-condition inputs (e.g., sketch or text), causing entangled features and limited controllability. To resolve this, we propose a global appearance model that explicitly disentangles silhouette, color, and text, enabling precise multi-conditional control." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.512, + 0.492, + 0.768 + ], + "angle": 0, + "content": "Architecture. As shown in the left of the Fig. 3, our global appearance model comprises two shared frozen VAE encoders, one frozen VAE decoder, a trainable silhouette UNet, a frozen text encoder, a trainable color adapter, and a denoising UNet with the proposed mixed attention. Specifically, we first utilize the frozen VAE encoder to project the input reference silhouette into the latent space. Subsequently, we employ a trainable silhouette UNet (structurally identical to the denoising UNet but without cross attention) to extract fine-grained silhouette features, which are then integrated into the frozen denoising UNet via our proposed mixed attention module. Meanwhile, textual features obtained from the frozen CLIP text encoder and color features extracted by the proposed color adapter are further fused into the denoising UNet through cross attention. After multiple denoising iterations, the model generates coarse garment images that precisely align with the reference silhouette and faithfully reflect user-specified color." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.77, + 0.492, + 0.891 + ], + "angle": 0, + "content": "Mixed Attention. To effectively incorporate reference silhouette features into the denoising UNet without compromising the generative capability of the original UNet, we propose a mixed attention module. As shown in Fig. 3, we extend all self attention layers in the denoising UNet to the proposed mixed attention, which introduces two additional learnable projection layers to align the silhouette features \\( C_s \\) with the latent features \\( Z_t \\). Formally, the mixed attention is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.084, + 0.899, + 0.491, + 0.947 + ], + "angle": 0, + "content": "\\[\nZ _ {m} = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}}\\right) V + \\alpha \\cdot \\operatorname {S o f t m a x} \\left(\\frac {Q \\left(K ^ {\\prime}\\right) ^ {T}}{\\sqrt {d}}\\right) V ^ {\\prime}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.07, + 0.921, + 0.098 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is a hyperparameter controlling the strength of silhouette conditioning. The projections are computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.505, + 0.104, + 0.921, + 0.132 + ], + "angle": 0, + "content": "\\[\nQ = Z _ {t} W _ {q}, K = Z _ {t} W _ {k}, V = Z _ {t} W _ {v}, K ^ {\\prime} = C _ {s} W _ {k} ^ {\\prime}, V ^ {\\prime} = C _ {s} W _ {v} ^ {\\prime} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.133, + 0.921, + 0.237 + ], + "angle": 0, + "content": "where \\( W_{q}, W_{k}, W_{v} \\) are frozen parameters of linear projection layers, whereas \\( W_{k}^{\\prime}, W_{v}^{\\prime} \\) are newly introduced learnable parameters of projection layers initialized from \\( W_{k} \\) and \\( W_{v} \\), respectively. Our mixed attention facilitates the seamless integration of silhouette features into the denoising UNet, thus ensuring that generated garments maintain precise spatial alignment with the reference silhouette." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.238, + 0.922, + 0.448 + ], + "angle": 0, + "content": "Color Adapter. Accurate color manipulation is essential for generating garments with fine-grained visual details, significantly enhancing visual quality and realism. However, as the base model's textual prompts cannot reliably produce the intended colors, discrepancies often arise between the generated and expected colors. To address this issue, we propose a dedicated color adapter that explicitly treats color as an independent controllable factor. Specifically, given a reference color image, we extract color features \\( C_c \\) using a frozen CLIP image encoder combined with a trainable linear layer. Subsequently, these color features are integrated into the denoising UNet via a cross attention mechanism, jointly with textual features \\( C_t \\) obtained from the frozen CLIP text encoder:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.453, + 0.921, + 0.489 + ], + "angle": 0, + "content": "\\[\nZ _ {n e w} = \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {t} ^ {T}}{\\sqrt {d}}\\right) V _ {t} + \\beta \\cdot \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {c} ^ {T}}{\\sqrt {d}}\\right) V _ {c}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.494, + 0.921, + 0.6 + ], + "angle": 0, + "content": "where \\( Q = Z_{t}W_{q} \\), \\( K_{t} = C_{t}W_{k}^{t} \\), \\( V_{t} = C_{t}W_{v}^{t} \\), and \\( K_{c} = C_{c}W_{k}^{c} \\), \\( V_{c} = C_{c}W_{v}^{c} \\). Here, \\( W_{k}^{t}, W_{v}^{t} \\) denote frozen parameters of the original cross attention layers in the denoising UNet, while \\( W_{k}^{c}, W_{v}^{c} \\) are newly introduced trainable projection layers. The hyperparameter \\( \\beta \\) modulates the adapter's influence, ensuring precise alignment between generated colors and user specifications." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.623, + 0.779, + 0.637 + ], + "angle": 0, + "content": "C. Stage II: Local Enhancement Model" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.642, + 0.922, + 0.779 + ], + "angle": 0, + "content": "Motivation. Existing methods [18], [19] typically neglect detailed logo integration or treat it as a separate task, causing poor spatial alignment and visual inconsistency. To address this limitation, we propose a local enhancement model equipped with an adaptive appearance-aware \\((A^3)\\) module, explicitly injecting user-defined logos and spatial constraints into the latent space. This design enables precise, consistent control over localized garment details, significantly enhancing visual fidelity." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.779, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Architecture. As illustrated on the right of Fig. 3, the local enhancement model comprises a frozen VAE encoder and decoder, a denoising UNet, and an adaptive appearance-aware module (\\(A^3\\) module). The \\(A^3\\) module fuses local conditions, such as logos and spatial constraints, by concatenating them along spatial or channel dimensions, enabling precise control over fine-grained visual elements. Given a garment, logo, and placement mask, the model adaptively adjusts the logo's size and position while preserving its visual fidelity. To reduce redundancy and focus on local detail refinement, we optimize only the self attention layers of the denoising UNet and discard" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.07, + 0.511, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.071, + 0.92, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.304, + 0.924, + 0.355 + ], + "angle": 0, + "content": "Fig. 3. Overview of our IMAGGarment framework. IMAGGarment is a two-stage conditional diffusion framework for fine-grained garment generation. The global appearance model first synthesizes a coarse latent representation from the input text prompt, silhouette, and color palette using a parallel UNet with mixed attention and a color adapter. The local enhancement model then refines this latent by injecting user-defined logos and location constraints through the proposed \\(A^3\\) module, enabling precise logo placement and high-fidelity garment generation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.377, + 0.492, + 0.406 + ], + "angle": 0, + "content": "all cross attention layers, as the global appearance model has already encoded the textual information." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.406, + 0.493, + 0.589 + ], + "angle": 0, + "content": "\\(A^3\\) Module. To precisely integrate fine-grained logo details into designated garment regions, we introduce the adaptive appearance-aware \\((A^3)\\) module. By fusing image-based conditions across specific dimensions, our \\(A^3\\) module enables precise and consistent logo integration. Specifically, given a coarse garment image \\(G\\), a logo image \\(L\\), and a binary placement mask \\(M\\), we first encode them using a frozen VAE encoder to obtain their corresponding latent features: \\(C_g \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{8}}\\) and \\(C_l \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{8}}\\). The mask \\(M\\) is resized via nearest-neighbor interpolation to match the latent resolution, resulting in \\(C_m \\in \\mathbb{R}^{1 \\times \\frac{H}{8} \\times \\frac{W}{8}}\\). We then construct the spatially aligned conditional input as:" + }, + { + "type": "equation", + "bbox": [ + 0.121, + 0.591, + 0.49, + 0.613 + ], + "angle": 0, + "content": "\\[\nX = \\operatorname {C o n c a t} \\left(C _ {g} \\otimes C _ {m}, C _ {l}\\right), \\quad X \\in \\mathbb {R} ^ {4 \\times \\frac {H}{8} \\times \\frac {W}{4}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.617, + 0.493, + 0.693 + ], + "angle": 0, + "content": "where \\(\\otimes\\) denotes element-wise multiplication and Concat indicates spatial concatenation along the width dimension. To align with \\(X\\), the resized mask \\(C_m\\) is zero-padded to obtain \\(C_M \\in \\mathbb{R}^{1 \\times \\frac{H}{8} \\times \\frac{W}{4}}\\). Next, we concatenate the garment and logo features to form a clean latent representation:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.699, + 0.49, + 0.716 + ], + "angle": 0, + "content": "\\[\nx _ {0} = \\operatorname {C o n c a t} \\left(C _ {g}, C _ {l}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.722, + 0.441, + 0.737 + ], + "angle": 0, + "content": "and inject noise consistent with the diffusion process:" + }, + { + "type": "equation", + "bbox": [ + 0.131, + 0.741, + 0.49, + 0.76 + ], + "angle": 0, + "content": "\\[\nx _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} \\cdot x _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\cdot \\epsilon , \\quad \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.765, + 0.493, + 0.886 + ], + "angle": 0, + "content": "where \\( x_0 \\) denotes the clean latent feature obtained by concatenating garment and logo features, and \\( x_{t} \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{4}} \\) is the corresponding noisy latent at diffusion timestep \\( t \\). \\( \\bar{\\alpha}_{t} \\) is the cumulative product of the noise schedule coefficients, and \\( \\epsilon \\) is the Gaussian noise sampled from \\( \\mathcal{N}(0,\\mathbf{I}) \\). Finally, the full model input is obtained by concatenating the noisy latent \\( x_{t} \\), the padded mask \\( C_M \\), and the aligned conditional input \\( X \\) along the channel dimension:" + }, + { + "type": "equation", + "bbox": [ + 0.129, + 0.889, + 0.49, + 0.91 + ], + "angle": 0, + "content": "\\[\nZ = \\operatorname {C o n c a t} \\left(x _ {t}, C _ {M}, X\\right), \\quad Z \\in \\mathbb {R} ^ {9 \\times \\frac {H}{8} \\times \\frac {W}{4}}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.947 + ], + "angle": 0, + "content": "This channel-wise concatenation allows the model to jointly reason over appearance, spatial constraints, and guidance" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.377, + 0.921, + 0.409 + ], + "angle": 0, + "content": "signals, while maintaining compatibility with the UNet architecture for spatially aware logo synthesis." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.433, + 0.688, + 0.448 + ], + "angle": 0, + "content": "D. Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.453, + 0.922, + 0.62 + ], + "angle": 0, + "content": "Training. The training process is divided into two stages, each targeting a specific set of objectives with separate optimization strategies. We first train the global appearance model independently to generate a semantically coherent garment representation conditioned on silhouette and color. After verifying its performance, we freeze it and train the local enhancement model to inject fine-grained logos guided by spatial masks. This sequential training avoids gradient interference between heterogeneous objectives and ensures each module converges toward its task-specific goal. Both stages adopt mean squared error (MSE) loss to supervise the denoising process." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.62, + 0.922, + 0.756 + ], + "angle": 0, + "content": "Stage I. The global appearance model \\(\\theta_{g}\\) is trained to synthesize garments that align with the target silhouette and color under textual guidance. To preserve the generative capacity of the pretrained denoising UNet, we freeze all parameters except those of the silhouette UNet and the cross-attention projections in the mixed attention module. Given silhouette features \\(C_s\\), text embeddings \\(C_t\\), and color features \\(C_c\\), we adopt a decoupled training strategy with \\(L_{\\mathrm{silhouette}}\\) and \\(L_{\\mathrm{color}}\\) losses:" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.762, + 0.92, + 0.788 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {s i l h o u e t t e}} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {t}, C _ {s}, t} \\| \\epsilon - \\epsilon_ {\\theta_ {g}} (x _ {t}, C _ {t}, C _ {s}, t) \\| ^ {2}, \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.785, + 0.882, + 0.805 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {c o l o r}} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {t}, C _ {c}, t} \\left\\| \\epsilon - \\epsilon_ {\\theta_ {g}} (x _ {t}, C _ {t}, C _ {c}, t) \\right\\| ^ {2},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.812, + 0.921, + 0.842 + ], + "angle": 0, + "content": "where \\(\\epsilon\\) is the added noise and \\(\\epsilon_{\\theta_g}\\) is the prediction from the global appearance model at timestep \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.842, + 0.922, + 0.919 + ], + "angle": 0, + "content": "Stage II. The local enhancement model \\(\\theta_{l}\\) refines the coarse latent by injecting logos at user-defined locations. To reduce overhead, we fine-tune only the self-attention layers of the logo UNet. Given logo feature \\(C_l\\), spatial mask \\(C_m\\), and garment latent \\(C_g\\), the training objective \\(L_{\\mathrm{logo}}\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.928, + 0.921, + 0.947 + ], + "angle": 0, + "content": "\\[\nL _ {\\log o} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {l}, C _ {m}, C _ {g}, t} \\| \\epsilon - \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) \\| ^ {2}, \\tag {9}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "6" + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.072, + 0.345, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.356, + 0.072, + 0.609, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.072, + 0.896, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.361, + 0.3, + 0.586, + 0.315 + ], + "angle": 0, + "content": "(a) Dataset Construction Pipeline" + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.317, + 0.346, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.387, + 0.317, + 0.638, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.317, + 0.897, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.422, + 0.346, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.422, + 0.625, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.422, + 0.896, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.529, + 0.629, + 0.545 + ], + "angle": 0, + "content": "(b) Samples from the GarmentBench Dataset" + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.558, + 0.921, + 0.582 + ], + "angle": 0, + "content": "Fig. 4. Overview of GarmentBench dataset construction pipeline and samples. (a) Data construction pipeline for GarmentBench. (b) Example samples with multimodal annotations: silhouette, logo, text, logo location, and color." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.608, + 0.492, + 0.636 + ], + "angle": 0, + "content": "where \\(\\epsilon_{\\theta_l}\\) denotes the prediction from the local enhancement model." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.642, + 0.491, + 0.734 + ], + "angle": 0, + "content": "Inference. IMAGGarment supports end-to-end inference through a two-stage pipeline operating in a shared latent space. The global appearance model first generates a latent of coarse garment image conditioned on the input text prompt, silhouette, color, and mask. This process is guided by classifier-free guidance (CFG) [47]:" + }, + { + "type": "equation", + "bbox": [ + 0.106, + 0.757, + 0.49, + 0.794 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\check {\\epsilon} _ {\\theta_ {g}} \\left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\\right) = w \\cdot \\epsilon_ {\\theta_ {g}} \\left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\\right) \\tag {10} \\\\ + (1 - w) \\cdot \\epsilon_ {\\theta_ {g}} \\left(x _ {t}, t\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.816, + 0.492, + 0.891 + ], + "angle": 0, + "content": "here, \\( w \\) is the CFG scale and \\( x_{t} \\) denotes the noisy latent at timestep \\( t \\). The coarse latent is then refined by the local enhancement model, which incorporates user-defined logos and spatial constraints through the \\( A^3 \\) module. We apply conditional CFG:" + }, + { + "type": "equation", + "bbox": [ + 0.086, + 0.913, + 0.49, + 0.95 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\check {\\epsilon} _ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) = w \\cdot \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) \\tag {11} \\\\ + (1 - w) \\cdot \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {m}, C _ {g}, t\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.646, + 0.608, + 0.782, + 0.622 + ], + "angle": 0, + "content": "IV. EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.634, + 0.671, + 0.647 + ], + "angle": 0, + "content": "A. Dataset and Metrics" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.656, + 0.921, + 0.761 + ], + "angle": 0, + "content": "Dataset Construction. As shown in Fig. 4 (a), we construct and release GarmentBench, a large-scale dataset for fine-grained garment generation, containing multi-modal design conditions such as text, sketches, colors, logos, and location masks. It serves as a controllable and extensible benchmark for advancing personalized fashion generation. The construction process is as follows:" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.763, + 0.922, + 0.869 + ], + "angle": 0, + "content": "(1) Image Collection and Preprocessing. We collect over 189K high-quality garment images from the internet, covering a wide range of categories such as tops, bottoms, and dresses. To eliminate background distractions and focus on the garment region, we apply YOLOv8 [48] for clothing detection and perform tight cropping to obtain clean garment-centric images for further processing." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.87, + 0.922, + 0.945 + ], + "angle": 0, + "content": "(2) Text, Sketch, and Color Extraction. For each image, we automatically generate three auxiliary conditions to simulate real-world design guidance: textual descriptions generated by the multi-modal LLM Qwen-VL-Chat [49], covering key attributes such as color, silhouette, and style; structural sketches" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.763, + 0.922, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.492, + 0.129 + ], + "angle": 0, + "content": "obtained using Informative-Drawings [50], providing shape and layout priors; and color palettes extracted from single-color garments identified via ResNet50 [51] and clustered using K-means [52]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.13, + 0.492, + 0.265 + ], + "angle": 0, + "content": "(3) Logo Extraction and Location Annotation. To support logo insertion and spatial control, we further extract local design elements such as logos and prints. We use YOLOv8 to detect visually distinct regions (e.g., anime characters, animal patterns), followed by manual verification to ensure label quality. We also annotate spatial locations and generate binary masks to serve as precise spatial constraints. In total, GarmentBench contains 189,966 garment-condition pairs with rich fine-grained annotations." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.266, + 0.493, + 0.401 + ], + "angle": 0, + "content": "Dataset Description. As shown in Fig. 4 (b), we present representative samples from the GarmentBench dataset, which include fine-grained garment images paired with multi-modal conditions such as textual descriptions, structural silhouettes, color references, logos, and spatial location masks. Additionally, we randomly sample images from the Fashion-ControlNet-Dataset-V31 and apply the same preprocessing pipeline as GarmentBench to construct a test set with 1,267 image-condition pairs for evaluation and comparative analysis." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.402, + 0.493, + 0.522 + ], + "angle": 0, + "content": "Dataset Statement. GarmentBench is curated from publicly available fashion imagery under a non-commercial research intent. All personal identifiers were removed; third-party logos and brand marks are included solely to evaluate controllability and remain the property of their respective owners. We release only derived annotations and source URLs (not raw images), together with license notices and a takedown procedure; exact split indices and random seeds are provided for reproducibility." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.523, + 0.493, + 0.735 + ], + "angle": 0, + "content": "Evaluation Metrics. We adopt four metrics to comprehensively evaluate visual quality, conditional consistency, and fine-grained controllability. Fréchet inception distance (FID) [53] measures the distribution similarity between generated and real images, reflecting overall realism. Color structure similarity (CSS) [54] assesses the consistency of color distribution, measuring color controllability. Lastly, Logo location accuracy (LLA) [55] quantifies the spatial deviation between generated and target logo positions, reflecting spatial precision. Learned perceptual image patch similarity (LPIPS) [56] reflects human-perceived visual similarity, effectively capturing structural and textural consistency. These metrics comprehensively assess quality and controllability in fine-grained garment generation." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.757, + 0.26, + 0.77 + ], + "angle": 0, + "content": "B. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.775, + 0.493, + 0.882 + ], + "angle": 0, + "content": "In our experiments, both the silhouette UNet and the denoising UNet are initialized with the pretrained Stable Diffusion v1.5 model2. The local enhancement model is based on the inpainting variant of Stable Diffusion v1.53, with only the self-attention layers being fine-tuned to reduce computational cost. We adopt OpenCLIP ViT-H/144 as the CLIP image encoder. All input images are resized to \\(512 \\times 640\\) resolution. We" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.894, + 0.478, + 0.907 + ], + "angle": 0, + "content": "1https://huggingface.co/datasets/Abrumu/Fashion_controlnet_dataset_V3" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.907, + 0.449, + 0.919 + ], + "angle": 0, + "content": "2https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.919, + 0.476, + 0.933 + ], + "angle": 0, + "content": "3https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.933, + 0.33, + 0.945 + ], + "angle": 0, + "content": "4 https://github.com/mlfoundations/open Clip" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.894, + 0.478, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.517, + 0.072, + 0.91, + 0.107 + ], + "angle": 0, + "content": "TABLE II QUANTITATIVE COMPARISONS ON GARMENTBENCH. OURS ACHIEVES THE TOP RESULTS ACROSS ALL METRICS, WITH BEST IN BOLD." + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.116, + 0.927, + 0.207 + ], + "angle": 0, + "content": "
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
BLIP-Diffusion* [41]101.99104.440.130.68
ControlNet-Garment* [22]41.2283.300.360.41
AnyDoor* [59]38.0868.240.650.17
IP-Adapter-Garment* [21]37.9592.950.360.43
IMAGGarment (Ours)17.6336.160.720.10
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.515, + 0.206, + 0.813, + 0.217 + ], + "angle": 0, + "content": "* denotes re-implemented by us for a fair comparison." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.243, + 0.922, + 0.378 + ], + "angle": 0, + "content": "use the AdamW optimizer [57] with a constant learning rate of \\(1 \\times 10^{-5}\\). The global appearance model and the local enhancement model are trained for 150K and 50K steps, respectively, using a batch size of 20. During inference, we adopt the DDIM sampler [58] with 50 sampling steps. Unless otherwise specified, the silhouette weight \\(\\alpha\\) and color weight \\(\\beta\\) in Eq.1 and Eq.3 are set to 0.6 and 1.0. The classifier-free guidance (CFG) scale \\(w\\) in Eq.10 and Eq.11 is set to a default value of 7.0." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.398, + 0.684, + 0.413 + ], + "angle": 0, + "content": "C. Baseline Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.416, + 0.922, + 0.84 + ], + "angle": 0, + "content": "Due to the absence of prior work tailored to fine-grained garment generation with multi-condition control, we compare our method against four representative baselines: BLIP-Diffusion [41], AnyDoor [59], ControlNet [22], and IP-Adapter [21]. For subject-driven generation methods, BLIP-Diffusion [41] leverages a learnable Q-Former to align textual and visual embeddings in the latent space, initially designed for subject-preserving generation from text-image pairs. AnyDoor [59] combines identity and detail encoders to reconstruct personalized content, which we adapt to conditions of garment appearance and logo inputs. For plugin-based baselines, we extend ControlNet [22] and IP-Adapter [21] by duplicating and modifying their conditional branches to support multi-conditional inputs, such as silhouette, color, and logo. The adapted versions are referred to as ControlNet-Garment and IP-Adapter-Garment. Specifically, for ControlNet-Garment, we input silhouette, color, logo and mask maps into the ControlNet branch and inject them at each downsampling block, following standard practice. For IP-Adapter-Garment, we extend the official implementation to accept silhouette, color, logo and mask embeddings, which are concatenated and injected via cross-attention. To ensure task relevance, all methods are fine-tuned on our GarmentBench dataset with support for logo-specific conditioning. All methods are trained and evaluated under identical training protocols, input resolutions, and hardware setups. The corresponding quantitative and qualitative results are presented in Table II and Fig. 5, respectively, with detailed analysis provided below." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.84, + 0.922, + 0.945 + ], + "angle": 0, + "content": "Quantitative Results. As shown in Table II, IMAGGarment achieves the best performance across all four metrics on the GarmentBench dataset, demonstrating its superiority in controllable fine-grained garment generation. Compared to subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]), which rely on global features for personalized reconstruction, IMAGGarment shows substantial improvements in FID," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "8" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.068, + 0.493, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.068, + 0.924, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.328, + 0.921, + 0.355 + ], + "angle": 0, + "content": "Fig. 5. Qualitative results on seen and unseen GarmentBench samples. The seen set uses original test pairs, while the unseen set involves randomly mixed conditions. IMAGGarment delivers the most consistent outputs, achieving accurate silhouette, color, and logo control across both settings." + }, + { + "type": "table_caption", + "bbox": [ + 0.12, + 0.37, + 0.446, + 0.394 + ], + "angle": 0, + "content": "TABLE III QUANTITATIVE ABLATION RESULTS ON GARMENTBENCH." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.4, + 0.497, + 0.504 + ], + "angle": 0, + "content": "
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
B0139.33104.540.150.64
B147.4236.650.300.15
B230.1997.050.560.33
B321.2043.000.650.11
B446.16108.250.520.38
Full17.6336.160.720.10
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.535, + 0.492, + 0.776 + ], + "angle": 0, + "content": "LPIPS, and CSS. These gains highlight the effectiveness of our mixed attention and color adapter modules in achieving coherent multi-condition fusion, resulting in more realistic, perceptually consistent, and color-faithful outputs. In contrast to plugin-based approaches (ControlNet-Garment [22], IP-Adapter-Garment [21]) that simply stack independent conditional branches, IMAGGarment yields significantly higher LLA, reflecting more precise logo placement. Our proposed \\(\\mathrm{A}^3\\) module drives these improvements, which adaptively injects spatial priors and logo features into the latent space for accurate local control. Overall, these results indicate that global-only conditioning or naive plugin stacking is insufficient for fine-grained control. By contrast, IMAGGarment provides an effective solution for multi-conditional garment synthesis, enabling precise coordination of global structure and local detail." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.779, + 0.492, + 0.946 + ], + "angle": 0, + "content": "Qualitative Results. Fig. 5 presents qualitative comparisons on both seen and unseen garments. Notably, the seen test set refers to the designated test split of our GarmentBench dataset. In the absence of other suitable public datasets, we assess generalization using an unseen-composition test split constructed by randomly recombining input conditions (e.g., silhouette, color, logo) into combinations that never appear during training, thereby simulating real-world fashion-design scenarios. On seen garments, subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]) reconstruct global appearance but lack spatial control. BLIP-Diffusion retains" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.368, + 0.923, + 0.671 + ], + "angle": 0, + "content": "logo identity yet fails at precise placement due to text-only conditioning, while AnyDoor introduces logo distortions and stylistic artifacts. Plugin-based baselines (ControlNet-Garment [22], IP-Adapter-Garment [21]) treat conditions independently, resulting in poor coordination. ControlNet-Garment suffers from cross-condition interference, and IP-Adapter-Garment often misplaces logos despite preserving structure. In contrast, IMAGGarment achieves accurate control over silhouette, color, and logo placement. On unseen garments, all baselines degrade notably. Subject-driven methods fail to generalize to novel layouts, AnyDoor distorts appearance, and BLIP-Diffusion struggles with logo positioning. Plugin-based methods also falter: ControlNet-Garment produces mismatched outputs, and IP-Adapter-Garment cannot interpret unseen spatial semantics. IMAGGarment remains robust, maintaining alignment across all conditions. This generalization stems from our \\(A^3\\) module, which effectively integrates spatial and visual cues in the latent space. These results validate the controllability and flexibility of our method in both seen and unseen settings." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.697, + 0.634, + 0.712 + ], + "angle": 0, + "content": "D. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.718, + 0.923, + 0.946 + ], + "angle": 0, + "content": "To validate the effectiveness of each component in our framework, we design a series of ablation variants within the IMAGGarment architecture: B0 uses the vanilla Stable Diffusion v1.5 without any of our proposed modules, serving as the baseline. B1 removes the local enhancement model (Stage II), evaluating the impact of omitting logo injection and spatial control. B2 removes the global appearance model (Stage I), assessing the model's performance without structured silhouette and color conditioning. B3 removes the color adapter from the global appearance model, isolating the role of color guidance in generation. B4 replaces our mixed attention with vanilla self-attention in the denoising UNet, testing the importance of spatial fusion with silhouette features. Full represents the complete IMAGGarment framework with all proposed modules integrated." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.04 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.065, + 0.903, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.511, + 0.921, + 0.525 + ], + "angle": 0, + "content": "Fig. 6. Qualitative ablation results on GarmentBench. The \"Full\" configuration achieves the best results, highlighting the importance of each component." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.539, + 0.494, + 0.932 + ], + "angle": 0, + "content": "Ablation of Architecture Design. Table III presents the quantitative impact of each component in our proposed IMAGGarment. In B1, which removes the local enhancement stage, the model struggles to place logos precisely, leading to degraded LLA. Although the overall garment structure is preserved, the lack of spatial control prevents accurate logo integration. In B2, without the global appearance stage, the model fails to maintain silhouette and color consistency, resulting in significantly worse FID, LPIPS, and CSS. This demonstrates that local injection alone is insufficient to handle global garment layouts. B3 disables the color adapter, causing notable drops in CSS, highlighting its role in faithful color transfer and control. B4 replaces our mixed attention with standard self-attention, which weakens the fusion of silhouette guidance and causes drops in both LPIPS and FID, indicating reduced realism and structural coherence. The full IMAGGarment achieves the best performance across all metrics, validating the complementary design of each module's effectiveness in handling multi-condition garment generation. Further, Fig. 6 shows qualitative comparisons. B1 fails to align logos spatially, while B2 produces distorted garments lacking color and silhouette guidance. Despite maintaining logo placement, B3 leads to color mismatch, and B4 generates less coherent garment layouts. In contrast, the full model successfully synthesizes garments with accurate silhouettes, precise logo placement, and faithful color reproduction, demonstrating the benefits" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.539, + 0.922, + 0.586 + ], + "angle": 0, + "content": "of our dual-stage design, color adapter, and mixed attention fusion. Overall, The \"Full\" configuration achieves the best results, highlighting the importance of each component." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.608, + 0.714, + 0.623 + ], + "angle": 0, + "content": "E. More Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.628, + 0.924, + 0.947 + ], + "angle": 0, + "content": "Controllability Analysis. We assess controllability by varying a single condition at a time (silhouette, color palette, or logo position) while keeping the others fixed. As shown in Fig. 7, each three column block visualizes the model's response to one condition. Changing the silhouette (left block) yields garments that match the target shapes, indicating that the mixed attention module preserves structural alignment. Varying the color palette (middle block) produces the intended color distributions, validating the color adapter for color faithful generation. Adjusting the logo position (right block) achieves precise spatial relocation, showing that the \\(A^3\\) module effectively injects spatial priors for local control. Overall, IMAGGarment provides fine-grained and decoupled control of garment attributes suitable for practical design workflows. Non-varied attributes remain stable across manipulations, reflecting minimal cross-condition interference and consistent editing behavior. Sequential composition of edits across attributes produces similar outcomes regardless of edit order, which suggests low inter-attribute coupling. Control fidelity also holds under moderate changes of viewpoint and background, supporting robustness in real design scenarios." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "10" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.068, + 0.345, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.068, + 0.622, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.068, + 0.897, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.448, + 0.921, + 0.471 + ], + "angle": 0, + "content": "Fig. 7. Controllability visualization. Each block varies one input condition while keeping others fixed. Left: Silhouette changes lead to consistent structural adaptation. Middle: Color palette variation results in accurate color transfer. Right: Logo mask adjustment yields precise spatial placement." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.476, + 0.898, + 0.649 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.651, + 0.515, + 0.665 + ], + "angle": 0, + "content": "Fig. 8. Hyperparameter analysis of silhouette weight \\(\\alpha\\) and color weight \\(\\beta\\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.679, + 0.492, + 0.936 + ], + "angle": 0, + "content": "Hyperparameter Analysis. We study the effect of two key hyperparameters in Eq.1 and Eq.3: the silhouette guidance weight \\(\\alpha\\) and the color conditioning weight \\(\\beta\\). From Fig. 8, varying \\(\\alpha\\) directly impacts the model's ability to follow the reference silhouette. When \\(\\alpha\\) is too low, the generated structure becomes blurry or deviates from the target shape; when too high, it may suppress color and text guidance. We empirically set \\(\\alpha = 0.6\\) for balanced structural alignment. Similarly, the color weight \\(\\beta\\) controls the influence of the color palette. As \\(\\beta\\) increases, color consistency improves steadily, with \\(\\beta = 1.0\\) yielding the best visual fidelity. Joint sweeps over \\((\\alpha, \\beta)\\) indicate a broad stability region around \\(\\alpha \\in [0.5, 0.7]\\) and \\(\\beta \\in [0.8, 1.1]\\), showing robustness to moderate mistuning. Interaction effects are mild: very large \\(\\alpha\\) slightly narrows the effective range of \\(\\beta\\), while very large \\(\\beta\\) can oversaturate colors and reduce shading nuance. We therefore adopt \\(\\alpha = 0.6\\) and \\(\\beta = 1.0\\) throughout all experiments." + }, + { + "type": "title", + "bbox": [ + 0.654, + 0.68, + 0.774, + 0.693 + ], + "angle": 0, + "content": "V. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.718, + 0.922, + 0.945 + ], + "angle": 0, + "content": "We propose IMAGGarment, a unified conditional diffusion framework for fine-grained garment generation with precise control over silhouette, color, and logo placement. By introducing mixed attention, color adapter, and the \\(A^3\\) module, our framework explicitly disentangles global structure (silhouette and color) from local attributes (logo content and spatial placement), enabling accurate spatial control and high-quality synthesis. To support this task, we construct GarmentBench, a large-scale benchmark with over 180K samples annotated with multi-level design conditions. Comprehensive experiments on both seen and unseen garments demonstrate that IMAGGarment achieves state-of-the-art results in structure fidelity, color consistency, and logo controllability. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "11" + }, + { + "type": "title", + "bbox": [ + 0.236, + 0.07, + 0.332, + 0.083 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.096, + 0.49, + 0.141 + ], + "angle": 0, + "content": "[1] Aijia Zhang, Weiqiang Jia, Qiang Zou, Yixiong Feng, Xiaoxiang Wei, and Ye Zhang. Diffusion-cad: Controllable diffusion model for generating computer-aided design models. IEEE Transactions on Visualization and Computer Graphics, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.143, + 0.492, + 0.198 + ], + "angle": 0, + "content": "[2] Xiongzheng Li, Jing Huang, Jinsong Zhang, Xiaokun Sun, Haibiao Xuan, Yu-Kun Lai, Yingdi Xie, Jingyu Yang, and Kun Li. Learning to infer inner-body under clothing from monocular video. IEEE Transactions on Visualization and Computer Graphics, 29(12):5083-5096, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.2, + 0.492, + 0.245 + ], + "angle": 0, + "content": "[3] Nannan Zhang, Zhenyu Xie, Zhengwentai Sun, Hairui Zhu, Zirong Jin, Nan Xiang, Xiaoguang Han, and Song Wu. Viton-gun: Person-to-person virtual try-on via garment unwrapping. IEEE Transactions on Visualization and Computer Graphics, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.246, + 0.492, + 0.29 + ], + "angle": 0, + "content": "[4] Wen-Yang Zhou, Lu Yuan, Shu-Yu Chen, Lin Gao, and Shi-Min Hu. Lcnerf: Local controllable face generation in neural radiance field. IEEE Transactions on Visualization and Computer Graphics, 30(8):5437-5448, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.291, + 0.492, + 0.337 + ], + "angle": 0, + "content": "[5] Pinaki Nath Chowdhury, Tuanfeng Wang, Duygu Ceylan, Yi-Zhe Song, and Yulia Gryaditskaya. Garment ideation: Iterative view-aware sketch-based garment modeling. In 2022 International Conference on 3D Vision (3DV), pages 22-31. IEEE, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.338, + 0.492, + 0.382 + ], + "angle": 0, + "content": "[6] Yu Jin and Kyungho Lee. Human-ai co-creation in fashion design ideation and sketching: an empirical study. In Proceedings of IEEE/CVF Computer Vision and Pattern Recognition Conference (CVPR), CVFAD Workshop, Seattle, USA, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.383, + 0.492, + 0.417 + ], + "angle": 0, + "content": "[7] Funda Durupynar and Ugur Gudukbay. A virtual garment design and simulation system. In 2007 11th International Conference Information Visualization (IV'07), pages 862-870. IEEE, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.418, + 0.492, + 0.462 + ], + "angle": 0, + "content": "[8] Saikrupa PA et al. Smart stitch: A mobile app for personalized garment customization and stitching guidance. In 2025 International Conference on Data Science, Agents & Artificial Intelligence (ICDSAAI), pages 1-5. IEEE, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.463, + 0.492, + 0.508 + ], + "angle": 0, + "content": "[9] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. 2023. arXiv:2307.01952." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.509, + 0.492, + 0.543 + ], + "angle": 0, + "content": "[10] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. 2022. ArXiv:2210.02747." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.544, + 0.492, + 0.589 + ], + "angle": 0, + "content": "[11] Wanchao Su, Hui Ye, Shu-Yu Chen, Lin Gao, and Hongbo Fu. Drawingstyles: Portrait image generation and editing with spatially conditioned stylegan. IEEE transactions on visualization and computer graphics, 29(10):4074-4088, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.59, + 0.492, + 0.635 + ], + "angle": 0, + "content": "[12] Changjian Chen, Fei Lv, Yalong Guan, Pengcheng Wang, Shengjie Yu, Yifan Zhang, and Zhuo Tang. Human-guided image generation for expanding small-scale training image datasets. IEEE Transactions on Visualization and Computer Graphics, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.636, + 0.492, + 0.669 + ], + "angle": 0, + "content": "[13] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. In Proceedings of the ACM SIGGRAPH Conference, pages 1–11, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.67, + 0.492, + 0.715 + ], + "angle": 0, + "content": "[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1125-1134, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.716, + 0.492, + 0.772 + ], + "angle": 0, + "content": "[15] Subhadeep Koley, Ayan Kumar Bhunia, Deeptanshu Sekhri, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. It's all about your sketch: Democratising sketch control in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7204-7214, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.773, + 0.492, + 0.829 + ], + "angle": 0, + "content": "[16] Subhadeep Koley, Ayan Kumar Bhunia, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. Text-to-image diffusion models are great sketch-photo matchmakers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16826-16837, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.83, + 0.492, + 0.864 + ], + "angle": 0, + "content": "[17] Taewook Kim, Ze Wang, Zhengyuan Yang, Jiang Wang, Lijuan Wang, Zicheng Liu, and Qiang Qiu. Conditional text-to-image generation with reference guidance. 2024. ArXiv:2411.16713." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.865, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[18] Jinghao Zhang, Wen Qian, Hao Luo, Fan Wang, and Feng Zhao. Anylogo: Symbiotic subject-driven diffusion system with gemini status. 2024. ArXiv:2409.17740." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.899, + 0.492, + 0.945 + ], + "angle": 0, + "content": "[19] Mingkang Zhu, Xi Chen, Zhongdao Wang, Hengshuang Zhao, and Jiaya Jia. Logosticker: Inserting logos into diffusion models for customized generation. In Proceedings of European Conference on Computer Vision, pages 363-378, 2024." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.096, + 0.492, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.129 + ], + "angle": 0, + "content": "[20] Mingzhe Yu, Yunshan Ma, Lei Wu, Changshuo Wang, Xue Li, and Lei Meng. Fashiondpo: Fine-tune fashion outfit generation model using direct preference optimization. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 212-222, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.129, + 0.921, + 0.162 + ], + "angle": 0, + "content": "[21] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. 2023. ArXiv:2308.06721." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.163, + 0.921, + 0.207 + ], + "angle": 0, + "content": "[22] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.208, + 0.921, + 0.254 + ], + "angle": 0, + "content": "[23] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.254, + 0.921, + 0.287 + ], + "angle": 0, + "content": "[24] Shu-Yu Chen, Wanchao Su, Lin Gao, Shihong Xia, and Hongbo Fu. Deepfacedrawing: deep generation of face images from sketches. ACM Transactions on Graphics, 39(4), August 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.288, + 0.921, + 0.332 + ], + "angle": 0, + "content": "[25] Shu-Yu Chen, Feng-Lin Liu, Yu-Kun Lai, Paul L. Rosin, Chunpeng Li, Hongbo Fu, and Lin Gao. Deepfaceediting: deep face generation and editing with disentangled geometry and appearance control. ACM Transactions on Graphics, 40(4), July 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.333, + 0.921, + 0.366 + ], + "angle": 0, + "content": "[26] Xian Wu, Chen Wang, Hongbo Fu, Ariel Shamir, Song-Hai Zhang, and Shi-Min Hu. Deepportraitdrawing: Generating human body images from freehand sketches, 2022. ArXiv:2205.02070." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.367, + 0.921, + 0.412 + ], + "angle": 0, + "content": "[27] Arnab Ghosh, Richard Zhang, Puneet K Dokania, Oliver Wang, Alexei A Efros, Philip HS Torr, and Eli Shechtman. Interactive sketch & fill: Multiclass sketch-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1171-1180, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.412, + 0.921, + 0.457 + ], + "angle": 0, + "content": "[28] Wengling Chen and James Hays. Sketchygan: Towards diverse and realistic sketch to image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 9416-9425, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.458, + 0.921, + 0.491 + ], + "angle": 0, + "content": "[29] Zeyu Li, Cheng Deng, Erkun Yang, and Dacheng Tao. Staged sketch-to-image synthesis via semi-supervised generative adversarial networks. IEEE Transactions on Multimedia, 23:2694-2705, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.492, + 0.921, + 0.525 + ], + "angle": 0, + "content": "[30] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE Signal Processing Magazine, 35(1):53-65, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.526, + 0.921, + 0.547 + ], + "angle": 0, + "content": "[31] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. 2014. ArXiv:1411.1784." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.548, + 0.921, + 0.593 + ], + "angle": 0, + "content": "[32] Yifang Men, Yiming Mao, Yuning Jiang, Wei-Ying Ma, and Zhouhui Lian. Controllable person image synthesis with attribute-decomposed gan. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 5084-5093, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.594, + 0.921, + 0.627 + ], + "angle": 0, + "content": "[33] Yifan Liu, Zengchang Qin, Zhenbo Luo, and Hua Wang. Auto-painter: Cartoon image generation from sketch by using conditional generative adversarial networks. 2017. ArXiv:1705.01908." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.628, + 0.921, + 0.672 + ], + "angle": 0, + "content": "[34] Yuanzheng Ci, Xinzhu Ma, Zhihui Wang, Haojie Li, and Zhongxuan Luo. User-guided deep anime line art colorization with conditional adversarial networks. In Proceedings of the 26th ACM International Conference on Multimedia, page 1536-1544, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.673, + 0.921, + 0.706 + ], + "angle": 0, + "content": "[35] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.707, + 0.921, + 0.75 + ], + "angle": 0, + "content": "[36] Liqian Ma, Xu Jia, Qianru Sun, Bernt Schiele, Tinne Tuytelaars, and Luc Van Gool. Pose guided person image generation. In Proceedings of the Conference on Neural Information Processing Systems, page 405-415, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.751, + 0.921, + 0.797 + ], + "angle": 0, + "content": "[37] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3408-3416, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.798, + 0.921, + 0.831 + ], + "angle": 0, + "content": "[38] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proceedings of the Conference on Neural Information Processing Systems, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.832, + 0.921, + 0.864 + ], + "angle": 0, + "content": "[39] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. 2020. ArXiv:2011.13456." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.865, + 0.921, + 0.898 + ], + "angle": 0, + "content": "[40] Junyao Gao, Yanan Sun, Fei Shen, Xin Jiang, Zhening Xing, Kai Chen, and Cairong Zhao. Faceshot: Bring any character into life. 2025. ArXiv:2503.00740." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.921, + 0.945 + ], + "angle": 0, + "content": "[41] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. Proceedings of the Conference on Neural Information Processing Systems, 36:30146-30166, 2023." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.418, + 0.042 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[42] Fei Shen, Xin Jiang, Xin He, Hu Ye, Cong Wang, Xiaoyu Du, Zechao Li, and Jinhui Tang. Imagdressing-v1: Customizable virtual dressing. 2024. ArXiv:2407.12705." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.107, + 0.492, + 0.14 + ], + "angle": 0, + "content": "[43] Ente Lin, Xujie Zhang, Fuwei Zhao, Yuxuan Luo, Xin Dong, Long Zeng, and Xiaodan Liang. Dreamfit: Garment-centric human generation via a lightweight anything-dressing encoder. 2024. ArXiv:2412.17644." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.141, + 0.492, + 0.174 + ], + "angle": 0, + "content": "[44] Weifeng Chen, Tao Gu, Yuhao Xu, and Arlene Chen. Magic clothing: Controllable garment-driven image synthesis. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6939-6948, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.174, + 0.492, + 0.208 + ], + "angle": 0, + "content": "[45] Yuhao Xu, Tao Gu, Weifeng Chen, and Chengcai Chen. Ootdiffusion: Outfitting fusion based latent diffusion for controllable virtual try-on. 2024. ArXiv:2403.01779." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.208, + 0.492, + 0.275 + ], + "angle": 0, + "content": "[46] Xujie Zhang, Binbin Yang, Michael C Kampffmeyer, Wenqing Zhang, Shiyue Zhang, Guansong Lu, Liang Lin, Hang Xu, and Xiaodan Liang. Diffcloth: Diffusion based garment synthesis and manipulation via structural cross-modal semantic alignment. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23154-23163, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.276, + 0.492, + 0.298 + ], + "angle": 0, + "content": "[47] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. 2022. ArXiv:2207.12598." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.299, + 0.492, + 0.322 + ], + "angle": 0, + "content": "[48] Muhammad Hussain. Yolov5, yolov8 and yolov10: The go-to detectors for real-time vision, 2024. ArXiv:2407.02988." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.322, + 0.492, + 0.367 + ], + "angle": 0, + "content": "[49] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond, 2023. ArXiv:2308.12966." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.367, + 0.492, + 0.412 + ], + "angle": 0, + "content": "[50] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7915-7925, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.412, + 0.492, + 0.435 + ], + "angle": 0, + "content": "[51] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. ArXiv:1512.03385." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.435, + 0.492, + 0.469 + ], + "angle": 0, + "content": "[52] J. MacQueen. Some methods for classification and analysis of multivariate observations. In Proceedings of the 5th Berkeley Symposium on Mathematical Statistics and Probability, pages 281-297, 1967." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.469, + 0.492, + 0.514 + ], + "angle": 0, + "content": "[53] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Proceedings of the Conference on Neural Information Processing Systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.514, + 0.492, + 0.559 + ], + "angle": 0, + "content": "[54] Kai Zeng, Zhou Wang, Anmin Zhang, Zhaohui Wang, and Wenjun Zhang. A color structural similarity index for image quality assessment. In Proceedings of the IEEE International Conference on Image Processing (ICIP), pages 660-664, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.559, + 0.492, + 0.604 + ], + "angle": 0, + "content": "[55] Masato Fujitake. Rl-logo: Deep reinforcement learning localization for logo recognition. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2830-2834. IEEE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.605, + 0.492, + 0.65 + ], + "angle": 0, + "content": "[56] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.65, + 0.492, + 0.672 + ], + "angle": 0, + "content": "[57] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. ArXiv:1711.05101." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.672, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[58] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. ArXiv:2010.02502." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.695, + 0.492, + 0.741 + ], + "angle": 0, + "content": "[59] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.741 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_origin.pdf b/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..557851b742005140a4c95318204b6c994507063e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/dcba8e1a-7c3a-45c9-a024-1335df59d42b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0ae508bf2c02c7a99b3cc715071b0fe964eda6f1780018ac7ff059a15485d35 +size 2652379 diff --git a/data/2025/2504_13xxx/2504.13176/full.md b/data/2025/2504_13xxx/2504.13176/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0111815db5f895293d73d1be3ed3151534f18b86 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/full.md @@ -0,0 +1,368 @@ +# IMAGGarment: Fine-Grained Garment Generation for Controllable Fashion Design + +Fei Shen, Jian Yu, Cong Wang, Xin Jiang, Xiaoyu Du, and Jinhui Tang, Senior Member, IEEE + +Abstract—This paper presents IMAGGarment, a fine-grained garment generation (FGG) framework that enables high-fidelity garment synthesis with precise control over silhouette, color, and logo placement. Unlike existing methods that are limited to single-condition inputs, IMAGGarment addresses the challenges of multi-conditional controllability in personalized fashion design and digital apparel applications. Specifically, IMAGGarment employs a two-stage training strategy to separately model global appearance and local details, while enabling unified and controllable generation through end-to-end inference. In the first stage, we propose a global appearance model that jointly encodes silhouette and color using a mixed attention module and a color adapter. In the second stage, we present a local enhancement model with an adaptive appearance-aware module to inject user-defined logos and spatial constraints, enabling accurate placement and visual consistency. To support this task, we release GarmentBench, a large-scale dataset comprising over 180K garment samples paired with multi-level design conditions, including sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that our method outperforms existing baselines, achieving superior structural stability, color fidelity, and local controllability performance. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment. + +Index Terms—Fine-Grained Garment Generation, Multi-Conditional Generation, Fashion Design Applications, Garment-Bench Dataset. + +# I. INTRODUCTION + +Fine-Grained garment generation (FGG) aims to synthesize high-quality garments with precise control over garment silhouette, color scheme, logo content, and spatial placement. As personalized fashion and the digital apparel market grow rapidly, fine-grained controllability [1]–[4] is increasingly crucial for applications in fashion design and e-commerce. + +In traditional garment ideation [5], [6] and visualization [7], [8], designers analyze line drawings to establish silhouette and construction, then select color palettes and materials, and + +Fei Shen is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the NExT++ Research Centre, National University of Singapore, Singapore, e-mail: shenfei29@nus.edu.sg + +Jian Yu, Xin Jiang, and Xiaoyu Du are with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China. e-mail: jianyu@njust.edu.cn; xinjiang@njust.edu.cn; duxy@njust.edu.cn. + +Cong Wang is with the State Key Laboratory for Novel Software Technology and the School of Computer Science, Nanjing University, Nanjing, 210023, China. e-mail: cw@smail.nju.edu.cn + +Jinhui Tang is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the College of Information Science and Technology and Artificial Intelligence, Nanjing Forestry University, Nanjing 210037, China, e-mail: jinhuitang@njust.edu.cn. (Corresponding author: Jinhui Tang.) + +finally arrange brand elements such as logos and trims. This manual workflow has two persistent drawbacks. First, it is time consuming: to match the specification, edits must be applied object by object and view by view; in a seasonal collection, even identical panels within the same board are recolored or relabeled one at a time, which does not scale. Second, it is error prone and inconsistent: small deviations in hue, shading, or logo placement arise across artists and rounds of revision, yielding mismatches across styles, sizes, and camera viewpoints. As project scope grows, these issues inflate turnaround time and complicate quality control and version management. + +Recently, image synthesis [9]–[12] has made notable progress in tasks such as sketch-to-image generation [13]–[16] and logo insertion [17]–[19] (as illustrated in Fig. 1 (a)), demonstrating basic capabilities in structural and content-level control. However, these tasks [13], [17], [20] provide only coarse guidance and rely on single-condition inputs (e.g., sketch or color), lacking the fine-grained controllability needed to model the nuanced interactions between global structure and local details in garment design. Although sequential or modular combinations may offer partial solutions, they [21]–[23] fail to explicitly disentangle and jointly model global attributes (e.g., silhouette, color) and local appearance details (e.g., logo content and spatial placement). Without unified control mechanisms, these approaches [21]–[23] often suffer from condition entanglement, conflicting objectives, and visual inconsistencies, ultimately falling short of the high standards required in real-world fashion design. In contrast, practical fashion design [5], [6] requires joint control over multiple interdependent factors: designers determine global attributes such as silhouette and color, followed by fine-tuning of local elements like logos and their placement. To support this process, a unified generation task that clearly separates and coordinates global and local attributes is essential for controllable and high-fidelity synthesis. + +To address these limitations, we propose a new task: fine-grained garment generation (FGG), as illustrated in Fig. 1 (b). FGG is formulated as a unified multi-conditional garment synthesis task, taking a textual prompt, garment silhouette, color palette, and spatially constrained logos as joint inputs. It aims to generate garments that faithfully reflect high-level structural intent and fine-grained local styling cues. FGG is specifically designed to mirror real-world fashion workflows, where designers must coordinate diverse input modalities to express creative intent. Unlike conventional approaches that process each condition independently or sequentially, FGG emphasizes joint modeling and hierarchical reasoning across + +![](images/0f5fef9dcc1b1ccc41c901842a791c5804778bc2b138eee313ea6ae6355ba813.jpg) + +![](images/834d6a64f5df6d2fb0ff04e31586fc15aec16332f5b005e9fecf16759317588f.jpg) + +![](images/ee977948bf238d1af469fbcbf6fb6da7daa33661a9b3df5feb126e4f4da014d3.jpg) +(a) Sketch-to-image and logo insertion task +(c) Generalization capability in real-world applications + +![](images/6200d8f841819a2ebd4f5e5c474a2ce4d32cfcbf74b8dfbe03fdb8337da294c7.jpg) +(b) Fine-grained garment generation task + +![](images/58d0650f24a575b2f82468b4fa675a9645b69d7fdb4dea11c4f4085cc207d7c3.jpg) +Fig. 1. Comparison of (a) existing sketch-to-image and logo insertion tasks with (b) our proposed fine-grained garment generation (FGG) task, which enables precise and controllable synthesis of garment structure, color, logo, and spatial placement. Unlike previous tasks that rely on a single input condition, FGG is tailored for real-world fashion design workflows by integrating multiple conditional controls. + +input types. It goes beyond simple task combinations by enforcing consistent integration of global and local attributes within a unified generation framework, enabling nuanced control over the overall structure and detailed appearance. Specifically, FGG task introduces three key challenges: (1) maintaining visual and semantic consistency across heterogeneous input conditions, (2) resolving conflicts between global structures and localized visual elements, and (3) generalizing to unseen condition combinations without retraining (see Fig. 1(c)). FGG thus marks a fundamental shift from single-condition or loosely coupled pipelines toward a unified, design-intent-driven generation paradigm that better reflects the complexity of real-world garment design. + +To this end, we propose IMAGGarment, a two-stage training and end-to-end inference framework tailored for fine-grained garment generation. Unlike prior methods that rely on single-condition inputs or simple feature fusion, our framework is explicitly designed to achieve fine-grained controllability under multiple, interdependent constraints. In the first stage, we propose a global appearance model with a mixed attention module and a color adapter to jointly encode garment silhouette and color palette, improving overall appearance fidelity and mitigating condition entanglement. In the second stage, we present a local enhancement model equipped with an adaptive appearance-aware module to inject user-defined logos and their spatial constraints, enabling precise logo placement while preserving global consistency. To further promote research in + +this direction, we release GarmentBench, a large-scale dataset comprising over 180k garment samples annotated with rich multi-level design conditions, including silhouette sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that IMAGGarment significantly outperforms existing baselines in terms of structural stability and local controllability. To summarize, the main contributions are listed as follows: + +- We propose IMAGGarment, a controllable garment generation framework that enables precise control over garment structure, color, and logo placement, addressing the challenges of FGG. +- We design a mixed attention module, color adapter, and adaptive appearance-aware module to disentangle global structure from local attributes, achieving fine-grained visual control and accurate spatial control. +- We release GarmentBench, a large-scale dataset with diverse garments and rich multi-conditional annotations, serving as a valuable benchmark for controllable garment generation research. + +The remainder of this paper is organized as follows. Section II surveys prior work on garment generation, encompassing GAN-based techniques and diffusion-based controllable generation. Section III describes the proposed IMAGGarment methodology, comprising a global appearance model with mixed attention and a color adapter, a local enhancement model with the A3 module, and the associated training and + +inference strategies. Section IV presents the experimental protocol and results, including the GarmentBench dataset and evaluation metrics, implementation details, and results and analysis. Section V concludes the paper. + +# II. RELATED WORK + +# A. GAN-Based Methods + +Early approaches [24]–[29] to garment generation predominantly build on generative adversarial networks (GANs) [30]–[32], with a major line devoted to sketch-to-image translation [33] that learns spatial mappings from structural cues. Representative systems such as DeepFaceDrawing [24] and DeepFaceEditing [25] decompose sketches into semantic components and progressively assemble photorealistic results, while DeepPortraitDrawing [26] extends this paradigm to full-body synthesis via local-to-global pipelines. Interactive frameworks [27] further introduce gating mechanisms for user-guided editing, and DALColor [34] combines WGAN-GP [35] with line-art colorization for refined appearance control. Beyond sketches, related GAN-based efforts explore pose- or part-guided generation [36], [37], leveraging learned warping or deformable alignment to better propagate structural constraints from sources to targets. + +However, these methods [24]–[27] are largely restricted to single-condition settings (e.g., sketches or poses alone), making it difficult to support real-world fashion scenarios that require joint control over multiple factors such as silhouette, garment layers, color/pattern, and local embellishments. Moreover, adversarial training is prone to instability and visual artifacts [32], [36], [37], and the reliance on paired or carefully aligned supervision limits robustness to occlusion, diverse body shapes, and open-world catalogs. As a result, while GAN-based pipelines can produce plausible textures under constrained conditions, they struggle to achieve reliable, fine-grained, and multi-conditional controllability at scale. + +# B. Diffusion-Based Methods + +Diffusion models [38]–[40] have achieved strong progress in conditional image generation owing to their iterative denoising process and flexible conditioning interfaces. To improve controllability with minimal modification to large backbones, plugin-based approaches such as IP-Adapter [21], ControlNet [22], and BLIP-Diffusion [41] inject external conditions (e.g., reference images, structural maps, or language cues) through lightweight adapters. In parallel, reference-guided or dual-stream designs [42]–[45] propagate features from exemplars alongside text/image prompts, thereby strengthening identity preservation and fine control during sampling. + +In fashion-related applications, DiffCloth [46] supports localized garment edits via part-specific textual prompts, enabling independent control over regions such as sleeves and collars. For logo-centric generation, AnyLogo [18] adopts a dual-state denoising strategy to retain subtle logo details; LogoSticker [19] performs token-based injection to flexibly place logo elements; and RefDiffuser [17] leverages expert-driven plugins to enhance texture fidelity and spatial alignment. Despite these advances, most methods emphasize either global + +TABLEI DEFINITIONS OF MAIN SYMBOLS USED IN THIS PAPER. + +
NotationDefinition
tTimestep
ZtLatent feature at t step
ZmOutput of mixed attention
x0Real image
xtNoisy data at t step
GGarment image
LLogo image
MMask image
CgFeature of garment image
ClFeature of logo image
CmFeature of mask image
CsFeature of silhouette image
CcFeature of color image
CtFeature of text prompt
θgGlobal appearance model
θlLocal enhancement model
εGaussian noise
αtCumulative product of noise weights
wGuidance scale
αSilhouette scale
βColor scale
+ +appearance control or localized editing in isolation. A unified framework that jointly models multiple design conditions, e.g., silhouette and layer topology together with color/pattern and local embellishments, while maintaining structural coherence across the denoising trajectory remains underexplored. + +# III. METHODOLOGY + +Symbol Definition. To introduce our IMAGGarment method more clearly, we define the main symbols used throughout the paper in TABLE I. + +Task Definition. Given a garment silhouette, color palette, user-defined logo, location and an optional text description, fine-grained garment generation (FGG) aims to synthesize high-fidelity garment images with precise control over both global structure and local visual attributes. The key challenges lie in jointly modeling multi-conditional inputs, maintaining semantic and visual consistency across different design factors, and supporting controllable placement of fine-grained elements such as logos and color regions. + +# A. Overall Framework + +To address the above challenges, we propose IMAGGarment, a conditional diffusion framework tailored for fine-grained garment generation. Our framework comprises two components: a global appearance model (stage I) and a local enhancement model (stage II), which explicitly disentangle and jointly control the global appearance and local details under multi-conditional guidance, enabling accurate synthesis of garment silhouette, color, and logo placement. As illustrated in Fig. 2, the global appearance model first generates a latent of coarse garment image conditioned on the textual prompt, garment silhouette, and color palette. Subsequently, the local enhancement model refines this latent representation by integrating user-defined logo and spatial constraint, producing the final high-fidelity garment image with fine-grained controllability. Specifically, the global appearance model (Section III-B) + +![](images/cb4c46b64e7895f57b8b540ab83c7a11fc75e7a22e0163f7d8735a205bd28267.jpg) +Fig. 2. Visualization of the IMAGGarment inference pipeline. The global appearance model generates coarse latent from textual prompts, silhouettes, and colors. The local enhancement model then injects user-defined logos and spatial location constraints to produce the fine-grained garment. + +leverages our proposed mixed attention module and color adapter to effectively capture global appearance features from textual descriptions, silhouettes, and colors, while mitigating entanglement among these conditions. The local enhancement model (Section III-C) introduces an adaptive appearance-aware module ( $A^3$ Module) that injects logo content and spatial location constraint into the latent space, achieving precise logo placement. Finally, the training and inference strategies used in IMAGGarment are summarized in Section III-D. + +# B. Stage I: Global Appearance Model + +Motivation. Existing garment generation methods [21]–[23] typically rely on single-condition inputs (e.g., sketch or text), causing entangled features and limited controllability. To resolve this, we propose a global appearance model that explicitly disentangles silhouette, color, and text, enabling precise multi-conditional control. + +Architecture. As shown in the left of the Fig. 3, our global appearance model comprises two shared frozen VAE encoders, one frozen VAE decoder, a trainable silhouette UNet, a frozen text encoder, a trainable color adapter, and a denoising UNet with the proposed mixed attention. Specifically, we first utilize the frozen VAE encoder to project the input reference silhouette into the latent space. Subsequently, we employ a trainable silhouette UNet (structurally identical to the denoising UNet but without cross attention) to extract fine-grained silhouette features, which are then integrated into the frozen denoising UNet via our proposed mixed attention module. Meanwhile, textual features obtained from the frozen CLIP text encoder and color features extracted by the proposed color adapter are further fused into the denoising UNet through cross attention. After multiple denoising iterations, the model generates coarse garment images that precisely align with the reference silhouette and faithfully reflect user-specified color. + +Mixed Attention. To effectively incorporate reference silhouette features into the denoising UNet without compromising the generative capability of the original UNet, we propose a mixed attention module. As shown in Fig. 3, we extend all self attention layers in the denoising UNet to the proposed mixed attention, which introduces two additional learnable projection layers to align the silhouette features $C_s$ with the latent features $Z_t$ . Formally, the mixed attention is defined as: + +$$ +Z _ {m} = \operatorname {S o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d}}\right) V + \alpha \cdot \operatorname {S o f t m a x} \left(\frac {Q \left(K ^ {\prime}\right) ^ {T}}{\sqrt {d}}\right) V ^ {\prime}, \tag {1} +$$ + +where $\alpha$ is a hyperparameter controlling the strength of silhouette conditioning. The projections are computed as follows: + +$$ +Q = Z _ {t} W _ {q}, K = Z _ {t} W _ {k}, V = Z _ {t} W _ {v}, K ^ {\prime} = C _ {s} W _ {k} ^ {\prime}, V ^ {\prime} = C _ {s} W _ {v} ^ {\prime} \tag {2} +$$ + +where $W_{q}, W_{k}, W_{v}$ are frozen parameters of linear projection layers, whereas $W_{k}^{\prime}, W_{v}^{\prime}$ are newly introduced learnable parameters of projection layers initialized from $W_{k}$ and $W_{v}$ , respectively. Our mixed attention facilitates the seamless integration of silhouette features into the denoising UNet, thus ensuring that generated garments maintain precise spatial alignment with the reference silhouette. + +Color Adapter. Accurate color manipulation is essential for generating garments with fine-grained visual details, significantly enhancing visual quality and realism. However, as the base model's textual prompts cannot reliably produce the intended colors, discrepancies often arise between the generated and expected colors. To address this issue, we propose a dedicated color adapter that explicitly treats color as an independent controllable factor. Specifically, given a reference color image, we extract color features $C_c$ using a frozen CLIP image encoder combined with a trainable linear layer. Subsequently, these color features are integrated into the denoising UNet via a cross attention mechanism, jointly with textual features $C_t$ obtained from the frozen CLIP text encoder: + +$$ +Z _ {n e w} = \operatorname {S o f t m a x} \left(\frac {Q K _ {t} ^ {T}}{\sqrt {d}}\right) V _ {t} + \beta \cdot \operatorname {S o f t m a x} \left(\frac {Q K _ {c} ^ {T}}{\sqrt {d}}\right) V _ {c}, \tag {3} +$$ + +where $Q = Z_{t}W_{q}$ , $K_{t} = C_{t}W_{k}^{t}$ , $V_{t} = C_{t}W_{v}^{t}$ , and $K_{c} = C_{c}W_{k}^{c}$ , $V_{c} = C_{c}W_{v}^{c}$ . Here, $W_{k}^{t}, W_{v}^{t}$ denote frozen parameters of the original cross attention layers in the denoising UNet, while $W_{k}^{c}, W_{v}^{c}$ are newly introduced trainable projection layers. The hyperparameter $\beta$ modulates the adapter's influence, ensuring precise alignment between generated colors and user specifications. + +# C. Stage II: Local Enhancement Model + +Motivation. Existing methods [18], [19] typically neglect detailed logo integration or treat it as a separate task, causing poor spatial alignment and visual inconsistency. To address this limitation, we propose a local enhancement model equipped with an adaptive appearance-aware $(A^3)$ module, explicitly injecting user-defined logos and spatial constraints into the latent space. This design enables precise, consistent control over localized garment details, significantly enhancing visual fidelity. + +Architecture. As illustrated on the right of Fig. 3, the local enhancement model comprises a frozen VAE encoder and decoder, a denoising UNet, and an adaptive appearance-aware module ( $A^3$ module). The $A^3$ module fuses local conditions, such as logos and spatial constraints, by concatenating them along spatial or channel dimensions, enabling precise control over fine-grained visual elements. Given a garment, logo, and placement mask, the model adaptively adjusts the logo's size and position while preserving its visual fidelity. To reduce redundancy and focus on local detail refinement, we optimize only the self attention layers of the denoising UNet and discard + +![](images/c72d485e9c001e310bafde876dc639ecb4c97bb638128363a67326dbe08778ca.jpg) +Fig. 3. Overview of our IMAGGarment framework. IMAGGarment is a two-stage conditional diffusion framework for fine-grained garment generation. The global appearance model first synthesizes a coarse latent representation from the input text prompt, silhouette, and color palette using a parallel UNet with mixed attention and a color adapter. The local enhancement model then refines this latent by injecting user-defined logos and location constraints through the proposed $A^3$ module, enabling precise logo placement and high-fidelity garment generation. + +![](images/00509be361fe8ae840f0c55a7033c936f301abc0d110fd4824306c662b54050d.jpg) + +all cross attention layers, as the global appearance model has already encoded the textual information. + +$A^3$ Module. To precisely integrate fine-grained logo details into designated garment regions, we introduce the adaptive appearance-aware $(A^3)$ module. By fusing image-based conditions across specific dimensions, our $A^3$ module enables precise and consistent logo integration. Specifically, given a coarse garment image $G$ , a logo image $L$ , and a binary placement mask $M$ , we first encode them using a frozen VAE encoder to obtain their corresponding latent features: $C_g \in \mathbb{R}^{4 \times \frac{H}{8} \times \frac{W}{8}}$ and $C_l \in \mathbb{R}^{4 \times \frac{H}{8} \times \frac{W}{8}}$ . The mask $M$ is resized via nearest-neighbor interpolation to match the latent resolution, resulting in $C_m \in \mathbb{R}^{1 \times \frac{H}{8} \times \frac{W}{8}}$ . We then construct the spatially aligned conditional input as: + +$$ +X = \operatorname {C o n c a t} \left(C _ {g} \otimes C _ {m}, C _ {l}\right), \quad X \in \mathbb {R} ^ {4 \times \frac {H}{8} \times \frac {W}{4}}, \tag {4} +$$ + +where $\otimes$ denotes element-wise multiplication and Concat indicates spatial concatenation along the width dimension. To align with $X$ , the resized mask $C_m$ is zero-padded to obtain $C_M \in \mathbb{R}^{1 \times \frac{H}{8} \times \frac{W}{4}}$ . Next, we concatenate the garment and logo features to form a clean latent representation: + +$$ +x _ {0} = \operatorname {C o n c a t} \left(C _ {g}, C _ {l}\right), \tag {5} +$$ + +and inject noise consistent with the diffusion process: + +$$ +x _ {t} = \sqrt {\bar {\alpha} _ {t}} \cdot x _ {0} + \sqrt {1 - \bar {\alpha} _ {t}} \cdot \epsilon , \quad \epsilon \sim \mathcal {N} (0, \mathbf {I}), \tag {6} +$$ + +where $x_0$ denotes the clean latent feature obtained by concatenating garment and logo features, and $x_{t} \in \mathbb{R}^{4 \times \frac{H}{8} \times \frac{W}{4}}$ is the corresponding noisy latent at diffusion timestep $t$ . $\bar{\alpha}_{t}$ is the cumulative product of the noise schedule coefficients, and $\epsilon$ is the Gaussian noise sampled from $\mathcal{N}(0,\mathbf{I})$ . Finally, the full model input is obtained by concatenating the noisy latent $x_{t}$ , the padded mask $C_M$ , and the aligned conditional input $X$ along the channel dimension: + +$$ +Z = \operatorname {C o n c a t} \left(x _ {t}, C _ {M}, X\right), \quad Z \in \mathbb {R} ^ {9 \times \frac {H}{8} \times \frac {W}{4}}. \tag {7} +$$ + +This channel-wise concatenation allows the model to jointly reason over appearance, spatial constraints, and guidance + +signals, while maintaining compatibility with the UNet architecture for spatially aware logo synthesis. + +# D. Training and Inference + +Training. The training process is divided into two stages, each targeting a specific set of objectives with separate optimization strategies. We first train the global appearance model independently to generate a semantically coherent garment representation conditioned on silhouette and color. After verifying its performance, we freeze it and train the local enhancement model to inject fine-grained logos guided by spatial masks. This sequential training avoids gradient interference between heterogeneous objectives and ensures each module converges toward its task-specific goal. Both stages adopt mean squared error (MSE) loss to supervise the denoising process. + +Stage I. The global appearance model $\theta_{g}$ is trained to synthesize garments that align with the target silhouette and color under textual guidance. To preserve the generative capacity of the pretrained denoising UNet, we freeze all parameters except those of the silhouette UNet and the cross-attention projections in the mixed attention module. Given silhouette features $C_s$ , text embeddings $C_t$ , and color features $C_c$ , we adopt a decoupled training strategy with $L_{\mathrm{silhouette}}$ and $L_{\mathrm{color}}$ losses: + +$$ +L _ {\text {s i l h o u e t t e}} = \mathbb {E} _ {x _ {0}, \epsilon , C _ {t}, C _ {s}, t} \| \epsilon - \epsilon_ {\theta_ {g}} (x _ {t}, C _ {t}, C _ {s}, t) \| ^ {2}, \tag {8} +$$ + +$$ +L _ {\mathrm {c o l o r}} = \mathbb {E} _ {x _ {0}, \epsilon , C _ {t}, C _ {c}, t} \left\| \epsilon - \epsilon_ {\theta_ {g}} (x _ {t}, C _ {t}, C _ {c}, t) \right\| ^ {2}, +$$ + +where $\epsilon$ is the added noise and $\epsilon_{\theta_g}$ is the prediction from the global appearance model at timestep $t$ . + +Stage II. The local enhancement model $\theta_{l}$ refines the coarse latent by injecting logos at user-defined locations. To reduce overhead, we fine-tune only the self-attention layers of the logo UNet. Given logo feature $C_l$ , spatial mask $C_m$ , and garment latent $C_g$ , the training objective $L_{\mathrm{logo}}$ is: + +$$ +L _ {\log o} = \mathbb {E} _ {x _ {0}, \epsilon , C _ {l}, C _ {m}, C _ {g}, t} \| \epsilon - \epsilon_ {\theta_ {l}} \left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\right) \| ^ {2}, \tag {9} +$$ + +![](images/64a02efe184fb95c9778897310f672e19f2755ec83fe199f87014ef556ee84c1.jpg) + +![](images/a8de64fc87ebbbb24fdc244dacf7fe6ce798d6fe30d9d7a74517f91cb90468d6.jpg) +(a) Dataset Construction Pipeline + +![](images/6db6350dc95f55e02bea38610b5cce4f65ba7b193e8b6bad514799343caf2b2b.jpg) + +![](images/66e6e0f583c671bacbde2f78f77f1b0f2a3e1e9a4818ff3254c5f43361254b11.jpg) + +![](images/326abcc7b866a086fbf4b235cb7a6accdd96a7ecdf6f25d4dae72b4360477921.jpg) + +![](images/3e41e76eb094e74b4d129e749105281a6ca3f501c3a424d666aa1e98c60a1ef4.jpg) + +![](images/87bcd73959fce6d8e915e15134827004325fc69c2bc2d357e668013804ed5736.jpg) +(b) Samples from the GarmentBench Dataset + +![](images/95e1be1017eb824a9064c72dfcb0e24d6268b917ef5154d0c09a78e835b4975e.jpg) +Fig. 4. Overview of GarmentBench dataset construction pipeline and samples. (a) Data construction pipeline for GarmentBench. (b) Example samples with multimodal annotations: silhouette, logo, text, logo location, and color. + +![](images/a5baa2afde9f8babea314ec85fc8461cf4b88aaa1b3a130882ea511a78b72e22.jpg) + +where $\epsilon_{\theta_l}$ denotes the prediction from the local enhancement model. + +Inference. IMAGGarment supports end-to-end inference through a two-stage pipeline operating in a shared latent space. The global appearance model first generates a latent of coarse garment image conditioned on the input text prompt, silhouette, color, and mask. This process is guided by classifier-free guidance (CFG) [47]: + +$$ +\begin{array}{l} \check {\epsilon} _ {\theta_ {g}} \left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\right) = w \cdot \epsilon_ {\theta_ {g}} \left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\right) \tag {10} \\ + (1 - w) \cdot \epsilon_ {\theta_ {g}} \left(x _ {t}, t\right) \\ \end{array} +$$ + +here, $w$ is the CFG scale and $x_{t}$ denotes the noisy latent at timestep $t$ . The coarse latent is then refined by the local enhancement model, which incorporates user-defined logos and spatial constraints through the $A^3$ module. We apply conditional CFG: + +$$ +\begin{array}{l} \check {\epsilon} _ {\theta_ {l}} \left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\right) = w \cdot \epsilon_ {\theta_ {l}} \left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\right) \tag {11} \\ + (1 - w) \cdot \epsilon_ {\theta_ {l}} \left(x _ {t}, C _ {m}, C _ {g}, t\right) \\ \end{array} +$$ + +# IV. EXPERIMENTS + +# A. Dataset and Metrics + +Dataset Construction. As shown in Fig. 4 (a), we construct and release GarmentBench, a large-scale dataset for fine-grained garment generation, containing multi-modal design conditions such as text, sketches, colors, logos, and location masks. It serves as a controllable and extensible benchmark for advancing personalized fashion generation. The construction process is as follows: + +(1) Image Collection and Preprocessing. We collect over 189K high-quality garment images from the internet, covering a wide range of categories such as tops, bottoms, and dresses. To eliminate background distractions and focus on the garment region, we apply YOLOv8 [48] for clothing detection and perform tight cropping to obtain clean garment-centric images for further processing. +(2) Text, Sketch, and Color Extraction. For each image, we automatically generate three auxiliary conditions to simulate real-world design guidance: textual descriptions generated by the multi-modal LLM Qwen-VL-Chat [49], covering key attributes such as color, silhouette, and style; structural sketches + +obtained using Informative-Drawings [50], providing shape and layout priors; and color palettes extracted from single-color garments identified via ResNet50 [51] and clustered using K-means [52]. + +(3) Logo Extraction and Location Annotation. To support logo insertion and spatial control, we further extract local design elements such as logos and prints. We use YOLOv8 to detect visually distinct regions (e.g., anime characters, animal patterns), followed by manual verification to ensure label quality. We also annotate spatial locations and generate binary masks to serve as precise spatial constraints. In total, GarmentBench contains 189,966 garment-condition pairs with rich fine-grained annotations. + +Dataset Description. As shown in Fig. 4 (b), we present representative samples from the GarmentBench dataset, which include fine-grained garment images paired with multi-modal conditions such as textual descriptions, structural silhouettes, color references, logos, and spatial location masks. Additionally, we randomly sample images from the Fashion-ControlNet-Dataset-V31 and apply the same preprocessing pipeline as GarmentBench to construct a test set with 1,267 image-condition pairs for evaluation and comparative analysis. + +Dataset Statement. GarmentBench is curated from publicly available fashion imagery under a non-commercial research intent. All personal identifiers were removed; third-party logos and brand marks are included solely to evaluate controllability and remain the property of their respective owners. We release only derived annotations and source URLs (not raw images), together with license notices and a takedown procedure; exact split indices and random seeds are provided for reproducibility. + +Evaluation Metrics. We adopt four metrics to comprehensively evaluate visual quality, conditional consistency, and fine-grained controllability. Fréchet inception distance (FID) [53] measures the distribution similarity between generated and real images, reflecting overall realism. Color structure similarity (CSS) [54] assesses the consistency of color distribution, measuring color controllability. Lastly, Logo location accuracy (LLA) [55] quantifies the spatial deviation between generated and target logo positions, reflecting spatial precision. Learned perceptual image patch similarity (LPIPS) [56] reflects human-perceived visual similarity, effectively capturing structural and textural consistency. These metrics comprehensively assess quality and controllability in fine-grained garment generation. + +# B. Implementation Details + +In our experiments, both the silhouette UNet and the denoising UNet are initialized with the pretrained Stable Diffusion v1.5 model2. The local enhancement model is based on the inpainting variant of Stable Diffusion v1.53, with only the self-attention layers being fine-tuned to reduce computational cost. We adopt OpenCLIP ViT-H/144 as the CLIP image encoder. All input images are resized to $512 \times 640$ resolution. We + +1https://huggingface.co/datasets/Abrumu/Fashion_controlnet_dataset_V3 +2https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 +3https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting +4 https://github.com/mlfoundations/open Clip + +TABLE II QUANTITATIVE COMPARISONS ON GARMENTBENCH. OURS ACHIEVES THE TOP RESULTS ACROSS ALL METRICS, WITH BEST IN BOLD. + +
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
BLIP-Diffusion* [41]101.99104.440.130.68
ControlNet-Garment* [22]41.2283.300.360.41
AnyDoor* [59]38.0868.240.650.17
IP-Adapter-Garment* [21]37.9592.950.360.43
IMAGGarment (Ours)17.6336.160.720.10
+ +* denotes re-implemented by us for a fair comparison. + +use the AdamW optimizer [57] with a constant learning rate of $1 \times 10^{-5}$ . The global appearance model and the local enhancement model are trained for 150K and 50K steps, respectively, using a batch size of 20. During inference, we adopt the DDIM sampler [58] with 50 sampling steps. Unless otherwise specified, the silhouette weight $\alpha$ and color weight $\beta$ in Eq.1 and Eq.3 are set to 0.6 and 1.0. The classifier-free guidance (CFG) scale $w$ in Eq.10 and Eq.11 is set to a default value of 7.0. + +# C. Baseline Comparisons + +Due to the absence of prior work tailored to fine-grained garment generation with multi-condition control, we compare our method against four representative baselines: BLIP-Diffusion [41], AnyDoor [59], ControlNet [22], and IP-Adapter [21]. For subject-driven generation methods, BLIP-Diffusion [41] leverages a learnable Q-Former to align textual and visual embeddings in the latent space, initially designed for subject-preserving generation from text-image pairs. AnyDoor [59] combines identity and detail encoders to reconstruct personalized content, which we adapt to conditions of garment appearance and logo inputs. For plugin-based baselines, we extend ControlNet [22] and IP-Adapter [21] by duplicating and modifying their conditional branches to support multi-conditional inputs, such as silhouette, color, and logo. The adapted versions are referred to as ControlNet-Garment and IP-Adapter-Garment. Specifically, for ControlNet-Garment, we input silhouette, color, logo and mask maps into the ControlNet branch and inject them at each downsampling block, following standard practice. For IP-Adapter-Garment, we extend the official implementation to accept silhouette, color, logo and mask embeddings, which are concatenated and injected via cross-attention. To ensure task relevance, all methods are fine-tuned on our GarmentBench dataset with support for logo-specific conditioning. All methods are trained and evaluated under identical training protocols, input resolutions, and hardware setups. The corresponding quantitative and qualitative results are presented in Table II and Fig. 5, respectively, with detailed analysis provided below. + +Quantitative Results. As shown in Table II, IMAGGarment achieves the best performance across all four metrics on the GarmentBench dataset, demonstrating its superiority in controllable fine-grained garment generation. Compared to subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]), which rely on global features for personalized reconstruction, IMAGGarment shows substantial improvements in FID, + +![](images/d3dc9901c6ffb37b8aa90fce37338bfe51ad35aff524d666f4aa800a8bbe3e52.jpg) +Fig. 5. Qualitative results on seen and unseen GarmentBench samples. The seen set uses original test pairs, while the unseen set involves randomly mixed conditions. IMAGGarment delivers the most consistent outputs, achieving accurate silhouette, color, and logo control across both settings. + +![](images/88692487b4d265fac91ccd274914ea963a8e85d3d96e1a9f6c38b9f571456b6a.jpg) + +TABLE III QUANTITATIVE ABLATION RESULTS ON GARMENTBENCH. + +
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
B0139.33104.540.150.64
B147.4236.650.300.15
B230.1997.050.560.33
B321.2043.000.650.11
B446.16108.250.520.38
Full17.6336.160.720.10
+ +LPIPS, and CSS. These gains highlight the effectiveness of our mixed attention and color adapter modules in achieving coherent multi-condition fusion, resulting in more realistic, perceptually consistent, and color-faithful outputs. In contrast to plugin-based approaches (ControlNet-Garment [22], IP-Adapter-Garment [21]) that simply stack independent conditional branches, IMAGGarment yields significantly higher LLA, reflecting more precise logo placement. Our proposed $\mathrm{A}^3$ module drives these improvements, which adaptively injects spatial priors and logo features into the latent space for accurate local control. Overall, these results indicate that global-only conditioning or naive plugin stacking is insufficient for fine-grained control. By contrast, IMAGGarment provides an effective solution for multi-conditional garment synthesis, enabling precise coordination of global structure and local detail. + +Qualitative Results. Fig. 5 presents qualitative comparisons on both seen and unseen garments. Notably, the seen test set refers to the designated test split of our GarmentBench dataset. In the absence of other suitable public datasets, we assess generalization using an unseen-composition test split constructed by randomly recombining input conditions (e.g., silhouette, color, logo) into combinations that never appear during training, thereby simulating real-world fashion-design scenarios. On seen garments, subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]) reconstruct global appearance but lack spatial control. BLIP-Diffusion retains + +logo identity yet fails at precise placement due to text-only conditioning, while AnyDoor introduces logo distortions and stylistic artifacts. Plugin-based baselines (ControlNet-Garment [22], IP-Adapter-Garment [21]) treat conditions independently, resulting in poor coordination. ControlNet-Garment suffers from cross-condition interference, and IP-Adapter-Garment often misplaces logos despite preserving structure. In contrast, IMAGGarment achieves accurate control over silhouette, color, and logo placement. On unseen garments, all baselines degrade notably. Subject-driven methods fail to generalize to novel layouts, AnyDoor distorts appearance, and BLIP-Diffusion struggles with logo positioning. Plugin-based methods also falter: ControlNet-Garment produces mismatched outputs, and IP-Adapter-Garment cannot interpret unseen spatial semantics. IMAGGarment remains robust, maintaining alignment across all conditions. This generalization stems from our $A^3$ module, which effectively integrates spatial and visual cues in the latent space. These results validate the controllability and flexibility of our method in both seen and unseen settings. + +# D. Ablation Study + +To validate the effectiveness of each component in our framework, we design a series of ablation variants within the IMAGGarment architecture: B0 uses the vanilla Stable Diffusion v1.5 without any of our proposed modules, serving as the baseline. B1 removes the local enhancement model (Stage II), evaluating the impact of omitting logo injection and spatial control. B2 removes the global appearance model (Stage I), assessing the model's performance without structured silhouette and color conditioning. B3 removes the color adapter from the global appearance model, isolating the role of color guidance in generation. B4 replaces our mixed attention with vanilla self-attention in the denoising UNet, testing the importance of spatial fusion with silhouette features. Full represents the complete IMAGGarment framework with all proposed modules integrated. + +![](images/ecb858eaa61509df14d51c16c06814ec844adac3bb7acb8a7e175d967928b21e.jpg) +Fig. 6. Qualitative ablation results on GarmentBench. The "Full" configuration achieves the best results, highlighting the importance of each component. + +Ablation of Architecture Design. Table III presents the quantitative impact of each component in our proposed IMAGGarment. In B1, which removes the local enhancement stage, the model struggles to place logos precisely, leading to degraded LLA. Although the overall garment structure is preserved, the lack of spatial control prevents accurate logo integration. In B2, without the global appearance stage, the model fails to maintain silhouette and color consistency, resulting in significantly worse FID, LPIPS, and CSS. This demonstrates that local injection alone is insufficient to handle global garment layouts. B3 disables the color adapter, causing notable drops in CSS, highlighting its role in faithful color transfer and control. B4 replaces our mixed attention with standard self-attention, which weakens the fusion of silhouette guidance and causes drops in both LPIPS and FID, indicating reduced realism and structural coherence. The full IMAGGarment achieves the best performance across all metrics, validating the complementary design of each module's effectiveness in handling multi-condition garment generation. Further, Fig. 6 shows qualitative comparisons. B1 fails to align logos spatially, while B2 produces distorted garments lacking color and silhouette guidance. Despite maintaining logo placement, B3 leads to color mismatch, and B4 generates less coherent garment layouts. In contrast, the full model successfully synthesizes garments with accurate silhouettes, precise logo placement, and faithful color reproduction, demonstrating the benefits + +of our dual-stage design, color adapter, and mixed attention fusion. Overall, The "Full" configuration achieves the best results, highlighting the importance of each component. + +# E. More Results and Analysis + +Controllability Analysis. We assess controllability by varying a single condition at a time (silhouette, color palette, or logo position) while keeping the others fixed. As shown in Fig. 7, each three column block visualizes the model's response to one condition. Changing the silhouette (left block) yields garments that match the target shapes, indicating that the mixed attention module preserves structural alignment. Varying the color palette (middle block) produces the intended color distributions, validating the color adapter for color faithful generation. Adjusting the logo position (right block) achieves precise spatial relocation, showing that the $A^3$ module effectively injects spatial priors for local control. Overall, IMAGGarment provides fine-grained and decoupled control of garment attributes suitable for practical design workflows. Non-varied attributes remain stable across manipulations, reflecting minimal cross-condition interference and consistent editing behavior. Sequential composition of edits across attributes produces similar outcomes regardless of edit order, which suggests low inter-attribute coupling. Control fidelity also holds under moderate changes of viewpoint and background, supporting robustness in real design scenarios. + +![](images/4bb9d8f0e0b3f923a99112f702d822e720cb125c76e10189c0e33f9fe0f8cf42.jpg) + +![](images/788e48ddfe0a081261d24ffc7f63ecfa0c2323f0e1a6000ec21737630ee71b22.jpg) +Fig. 7. Controllability visualization. Each block varies one input condition while keeping others fixed. Left: Silhouette changes lead to consistent structural adaptation. Middle: Color palette variation results in accurate color transfer. Right: Logo mask adjustment yields precise spatial placement. + +![](images/39d07a318b3669a4b33b0957f8d8651bb2279bd1192b7881c0f6b6c4a389919c.jpg) + +![](images/f8afae54c6c3fe55f79462f8c82eac82388206a87f2840c2e634af16eb091fed.jpg) +Fig. 8. Hyperparameter analysis of silhouette weight $\alpha$ and color weight $\beta$ . + +Hyperparameter Analysis. We study the effect of two key hyperparameters in Eq.1 and Eq.3: the silhouette guidance weight $\alpha$ and the color conditioning weight $\beta$ . From Fig. 8, varying $\alpha$ directly impacts the model's ability to follow the reference silhouette. When $\alpha$ is too low, the generated structure becomes blurry or deviates from the target shape; when too high, it may suppress color and text guidance. We empirically set $\alpha = 0.6$ for balanced structural alignment. Similarly, the color weight $\beta$ controls the influence of the color palette. As $\beta$ increases, color consistency improves steadily, with $\beta = 1.0$ yielding the best visual fidelity. Joint sweeps over $(\alpha, \beta)$ indicate a broad stability region around $\alpha \in [0.5, 0.7]$ and $\beta \in [0.8, 1.1]$ , showing robustness to moderate mistuning. Interaction effects are mild: very large $\alpha$ slightly narrows the effective range of $\beta$ , while very large $\beta$ can oversaturate colors and reduce shading nuance. We therefore adopt $\alpha = 0.6$ and $\beta = 1.0$ throughout all experiments. + +# V. CONCLUSION + +We propose IMAGGarment, a unified conditional diffusion framework for fine-grained garment generation with precise control over silhouette, color, and logo placement. By introducing mixed attention, color adapter, and the $A^3$ module, our framework explicitly disentangles global structure (silhouette and color) from local attributes (logo content and spatial placement), enabling accurate spatial control and high-quality synthesis. To support this task, we construct GarmentBench, a large-scale benchmark with over 180K samples annotated with multi-level design conditions. Comprehensive experiments on both seen and unseen garments demonstrate that IMAGGarment achieves state-of-the-art results in structure fidelity, color consistency, and logo controllability. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment. + +# REFERENCES + +[1] Aijia Zhang, Weiqiang Jia, Qiang Zou, Yixiong Feng, Xiaoxiang Wei, and Ye Zhang. Diffusion-cad: Controllable diffusion model for generating computer-aided design models. IEEE Transactions on Visualization and Computer Graphics, 2025. +[2] Xiongzheng Li, Jing Huang, Jinsong Zhang, Xiaokun Sun, Haibiao Xuan, Yu-Kun Lai, Yingdi Xie, Jingyu Yang, and Kun Li. Learning to infer inner-body under clothing from monocular video. IEEE Transactions on Visualization and Computer Graphics, 29(12):5083-5096, 2022. +[3] Nannan Zhang, Zhenyu Xie, Zhengwentai Sun, Hairui Zhu, Zirong Jin, Nan Xiang, Xiaoguang Han, and Song Wu. Viton-gun: Person-to-person virtual try-on via garment unwrapping. IEEE Transactions on Visualization and Computer Graphics, 2025. +[4] Wen-Yang Zhou, Lu Yuan, Shu-Yu Chen, Lin Gao, and Shi-Min Hu. Lcnerf: Local controllable face generation in neural radiance field. IEEE Transactions on Visualization and Computer Graphics, 30(8):5437-5448, 2023. +[5] Pinaki Nath Chowdhury, Tuanfeng Wang, Duygu Ceylan, Yi-Zhe Song, and Yulia Gryaditskaya. Garment ideation: Iterative view-aware sketch-based garment modeling. In 2022 International Conference on 3D Vision (3DV), pages 22-31. IEEE, 2022. +[6] Yu Jin and Kyungho Lee. Human-ai co-creation in fashion design ideation and sketching: an empirical study. In Proceedings of IEEE/CVF Computer Vision and Pattern Recognition Conference (CVPR), CVFAD Workshop, Seattle, USA, 2024. +[7] Funda Durupynar and Ugur Gudukbay. A virtual garment design and simulation system. In 2007 11th International Conference Information Visualization (IV'07), pages 862-870. IEEE, 2007. +[8] Saikrupa PA et al. Smart stitch: A mobile app for personalized garment customization and stitching guidance. In 2025 International Conference on Data Science, Agents & Artificial Intelligence (ICDSAAI), pages 1-5. IEEE, 2025. +[9] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. 2023. arXiv:2307.01952. +[10] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. 2022. ArXiv:2210.02747. +[11] Wanchao Su, Hui Ye, Shu-Yu Chen, Lin Gao, and Hongbo Fu. Drawingstyles: Portrait image generation and editing with spatially conditioned stylegan. IEEE transactions on visualization and computer graphics, 29(10):4074-4088, 2022. +[12] Changjian Chen, Fei Lv, Yalong Guan, Pengcheng Wang, Shengjie Yu, Yifan Zhang, and Zhuo Tang. Human-guided image generation for expanding small-scale training image datasets. IEEE Transactions on Visualization and Computer Graphics, 2025. +[13] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. In Proceedings of the ACM SIGGRAPH Conference, pages 1–11, 2023. +[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1125-1134, 2017. +[15] Subhadeep Koley, Ayan Kumar Bhunia, Deeptanshu Sekhri, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. It's all about your sketch: Democratising sketch control in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7204-7214, 2024. +[16] Subhadeep Koley, Ayan Kumar Bhunia, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. Text-to-image diffusion models are great sketch-photo matchmakers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16826-16837, 2024. +[17] Taewook Kim, Ze Wang, Zhengyuan Yang, Jiang Wang, Lijuan Wang, Zicheng Liu, and Qiang Qiu. Conditional text-to-image generation with reference guidance. 2024. ArXiv:2411.16713. +[18] Jinghao Zhang, Wen Qian, Hao Luo, Fan Wang, and Feng Zhao. Anylogo: Symbiotic subject-driven diffusion system with gemini status. 2024. ArXiv:2409.17740. +[19] Mingkang Zhu, Xi Chen, Zhongdao Wang, Hengshuang Zhao, and Jiaya Jia. Logosticker: Inserting logos into diffusion models for customized generation. In Proceedings of European Conference on Computer Vision, pages 363-378, 2024. + +[20] Mingzhe Yu, Yunshan Ma, Lei Wu, Changshuo Wang, Xue Li, and Lei Meng. Fashiondpo: Fine-tune fashion outfit generation model using direct preference optimization. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 212-222, 2025. +[21] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. 2023. ArXiv:2308.06721. +[22] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. +[23] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. +[24] Shu-Yu Chen, Wanchao Su, Lin Gao, Shihong Xia, and Hongbo Fu. Deepfacedrawing: deep generation of face images from sketches. ACM Transactions on Graphics, 39(4), August 2020. +[25] Shu-Yu Chen, Feng-Lin Liu, Yu-Kun Lai, Paul L. Rosin, Chunpeng Li, Hongbo Fu, and Lin Gao. Deepfaceediting: deep face generation and editing with disentangled geometry and appearance control. ACM Transactions on Graphics, 40(4), July 2021. +[26] Xian Wu, Chen Wang, Hongbo Fu, Ariel Shamir, Song-Hai Zhang, and Shi-Min Hu. Deepportraitdrawing: Generating human body images from freehand sketches, 2022. ArXiv:2205.02070. +[27] Arnab Ghosh, Richard Zhang, Puneet K Dokania, Oliver Wang, Alexei A Efros, Philip HS Torr, and Eli Shechtman. Interactive sketch & fill: Multiclass sketch-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1171-1180, 2019. +[28] Wengling Chen and James Hays. Sketchygan: Towards diverse and realistic sketch to image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 9416-9425, 2018. +[29] Zeyu Li, Cheng Deng, Erkun Yang, and Dacheng Tao. Staged sketch-to-image synthesis via semi-supervised generative adversarial networks. IEEE Transactions on Multimedia, 23:2694-2705, 2020. +[30] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE Signal Processing Magazine, 35(1):53-65, 2018. +[31] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. 2014. ArXiv:1411.1784. +[32] Yifang Men, Yiming Mao, Yuning Jiang, Wei-Ying Ma, and Zhouhui Lian. Controllable person image synthesis with attribute-decomposed gan. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 5084-5093, 2020. +[33] Yifan Liu, Zengchang Qin, Zhenbo Luo, and Hua Wang. Auto-painter: Cartoon image generation from sketch by using conditional generative adversarial networks. 2017. ArXiv:1705.01908. +[34] Yuanzheng Ci, Xinzhu Ma, Zhihui Wang, Haojie Li, and Zhongxuan Luo. User-guided deep anime line art colorization with conditional adversarial networks. In Proceedings of the 26th ACM International Conference on Multimedia, page 1536-1544, 2018. +[35] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017. +[36] Liqian Ma, Xu Jia, Qianru Sun, Bernt Schiele, Tinne Tuytelaars, and Luc Van Gool. Pose guided person image generation. In Proceedings of the Conference on Neural Information Processing Systems, page 405-415, 2017. +[37] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3408-3416, 2018. +[38] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proceedings of the Conference on Neural Information Processing Systems, 2020. +[39] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. 2020. ArXiv:2011.13456. +[40] Junyao Gao, Yanan Sun, Fei Shen, Xin Jiang, Zhening Xing, Kai Chen, and Cairong Zhao. Faceshot: Bring any character into life. 2025. ArXiv:2503.00740. +[41] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. Proceedings of the Conference on Neural Information Processing Systems, 36:30146-30166, 2023. + +[42] Fei Shen, Xin Jiang, Xin He, Hu Ye, Cong Wang, Xiaoyu Du, Zechao Li, and Jinhui Tang. Imagdressing-v1: Customizable virtual dressing. 2024. ArXiv:2407.12705. +[43] Ente Lin, Xujie Zhang, Fuwei Zhao, Yuxuan Luo, Xin Dong, Long Zeng, and Xiaodan Liang. Dreamfit: Garment-centric human generation via a lightweight anything-dressing encoder. 2024. ArXiv:2412.17644. +[44] Weifeng Chen, Tao Gu, Yuhao Xu, and Arlene Chen. Magic clothing: Controllable garment-driven image synthesis. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6939-6948, 2024. +[45] Yuhao Xu, Tao Gu, Weifeng Chen, and Chengcai Chen. Ootdiffusion: Outfitting fusion based latent diffusion for controllable virtual try-on. 2024. ArXiv:2403.01779. +[46] Xujie Zhang, Binbin Yang, Michael C Kampffmeyer, Wenqing Zhang, Shiyue Zhang, Guansong Lu, Liang Lin, Hang Xu, and Xiaodan Liang. Diffcloth: Diffusion based garment synthesis and manipulation via structural cross-modal semantic alignment. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23154-23163, 2023. +[47] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. 2022. ArXiv:2207.12598. +[48] Muhammad Hussain. Yolov5, yolov8 and yolov10: The go-to detectors for real-time vision, 2024. ArXiv:2407.02988. +[49] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond, 2023. ArXiv:2308.12966. +[50] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7915-7925, 2022. +[51] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. ArXiv:1512.03385. +[52] J. MacQueen. Some methods for classification and analysis of multivariate observations. In Proceedings of the 5th Berkeley Symposium on Mathematical Statistics and Probability, pages 281-297, 1967. +[53] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Proceedings of the Conference on Neural Information Processing Systems, 30, 2017. +[54] Kai Zeng, Zhou Wang, Anmin Zhang, Zhaohui Wang, and Wenjun Zhang. A color structural similarity index for image quality assessment. In Proceedings of the IEEE International Conference on Image Processing (ICIP), pages 660-664, 2014. +[55] Masato Fujitake. Rl-logo: Deep reinforcement learning localization for logo recognition. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2830-2834. IEEE, 2024. +[56] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018. +[57] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. ArXiv:1711.05101. +[58] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. ArXiv:2010.02502. +[59] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024. \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13176/images/00509be361fe8ae840f0c55a7033c936f301abc0d110fd4824306c662b54050d.jpg b/data/2025/2504_13xxx/2504.13176/images/00509be361fe8ae840f0c55a7033c936f301abc0d110fd4824306c662b54050d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90fc048da195cb1df5f552d92ec3e02f457e62aa --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/00509be361fe8ae840f0c55a7033c936f301abc0d110fd4824306c662b54050d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cc83026bd3ee7cce36bce17e44704d0632e9b15b3626dd1561bfaae6845deb8 +size 45203 diff --git a/data/2025/2504_13xxx/2504.13176/images/0816818dd69391867c1691bd9d8e9eaaba2592bff23060e93b57356b9fac22f7.jpg b/data/2025/2504_13xxx/2504.13176/images/0816818dd69391867c1691bd9d8e9eaaba2592bff23060e93b57356b9fac22f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42b165918ec6e0819de28d7d02d66d7f8d95976a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/0816818dd69391867c1691bd9d8e9eaaba2592bff23060e93b57356b9fac22f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d2de002cc8a46cc375f230d337a8c48b9b3b6a2ea7abc895ad5c61b84d0e209 +size 6252 diff --git a/data/2025/2504_13xxx/2504.13176/images/0f5fef9dcc1b1ccc41c901842a791c5804778bc2b138eee313ea6ae6355ba813.jpg b/data/2025/2504_13xxx/2504.13176/images/0f5fef9dcc1b1ccc41c901842a791c5804778bc2b138eee313ea6ae6355ba813.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c77ea0fe7c0d96cb29f4075644f8760be9ef2be --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/0f5fef9dcc1b1ccc41c901842a791c5804778bc2b138eee313ea6ae6355ba813.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:790834d64358ebc0bfb1d994cddfea8dfc823242db6f3dd345988495ff75dbe2 +size 15671 diff --git a/data/2025/2504_13xxx/2504.13176/images/1ee2571ac0b53d09bb1435f8aa3ce3aa1cd081b0f40908cec27aed899cd2e78e.jpg b/data/2025/2504_13xxx/2504.13176/images/1ee2571ac0b53d09bb1435f8aa3ce3aa1cd081b0f40908cec27aed899cd2e78e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0ea4c0adb931f51fed1dd191fa43a0b811ae6fb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/1ee2571ac0b53d09bb1435f8aa3ce3aa1cd081b0f40908cec27aed899cd2e78e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac6b51b7d3c0a8569c86b042b6ba8141195213945dce94e12968454053a231da +size 9646 diff --git a/data/2025/2504_13xxx/2504.13176/images/326abcc7b866a086fbf4b235cb7a6accdd96a7ecdf6f25d4dae72b4360477921.jpg b/data/2025/2504_13xxx/2504.13176/images/326abcc7b866a086fbf4b235cb7a6accdd96a7ecdf6f25d4dae72b4360477921.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71e9d1a2c965a031650cbe1ff2392d4149e25941 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/326abcc7b866a086fbf4b235cb7a6accdd96a7ecdf6f25d4dae72b4360477921.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50279a411a9a8b3463a9e18e1c21456f9533860635d1775e6c76dae7360d3558 +size 11668 diff --git a/data/2025/2504_13xxx/2504.13176/images/335dbfdc286a54b2bc0d2c1f0b09e7e683b15fb70e2cb7e562b37c3930e49a06.jpg b/data/2025/2504_13xxx/2504.13176/images/335dbfdc286a54b2bc0d2c1f0b09e7e683b15fb70e2cb7e562b37c3930e49a06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b97b694f0a01bb1b877c24f42c04f021a71734f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/335dbfdc286a54b2bc0d2c1f0b09e7e683b15fb70e2cb7e562b37c3930e49a06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5398741d75e0db2c5589c2817ac9e022e65278a69054a8346a308ea29ad3d3e4 +size 7065 diff --git a/data/2025/2504_13xxx/2504.13176/images/380582041820ddac62131e41cfc36154d411b7799852e3e145e7ca28d41e33bc.jpg b/data/2025/2504_13xxx/2504.13176/images/380582041820ddac62131e41cfc36154d411b7799852e3e145e7ca28d41e33bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a833135338f0cce4fcb61696ae8227f674434f42 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/380582041820ddac62131e41cfc36154d411b7799852e3e145e7ca28d41e33bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffc0cb64c20432ac39ad9adff8b0f3bc4e2026ae2db2cf85f3f2ef1279a757fb +size 48616 diff --git a/data/2025/2504_13xxx/2504.13176/images/382b919c432f4d91a756ad7b59aeee5ad88c01678449e8ec4d62479a1835bc33.jpg b/data/2025/2504_13xxx/2504.13176/images/382b919c432f4d91a756ad7b59aeee5ad88c01678449e8ec4d62479a1835bc33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..680eb11f40a6fb56a13f17bdb2e7b04c74bfda5f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/382b919c432f4d91a756ad7b59aeee5ad88c01678449e8ec4d62479a1835bc33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:685742f334f4297289f7e878a79ae7e559fa441355b931fe9162b919b89d4d97 +size 5880 diff --git a/data/2025/2504_13xxx/2504.13176/images/39d07a318b3669a4b33b0957f8d8651bb2279bd1192b7881c0f6b6c4a389919c.jpg b/data/2025/2504_13xxx/2504.13176/images/39d07a318b3669a4b33b0957f8d8651bb2279bd1192b7881c0f6b6c4a389919c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5629a743133c55cbb6c4bef034649375f777b261 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/39d07a318b3669a4b33b0957f8d8651bb2279bd1192b7881c0f6b6c4a389919c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa83577daabf46f55c0a86fae0c4bb1d9020deffa955406309853bb090de9d5f +size 32640 diff --git a/data/2025/2504_13xxx/2504.13176/images/3e41e76eb094e74b4d129e749105281a6ca3f501c3a424d666aa1e98c60a1ef4.jpg b/data/2025/2504_13xxx/2504.13176/images/3e41e76eb094e74b4d129e749105281a6ca3f501c3a424d666aa1e98c60a1ef4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8e6bf803cec15cd6fe49e136a8c2698f1db80a0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/3e41e76eb094e74b4d129e749105281a6ca3f501c3a424d666aa1e98c60a1ef4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc850bae02bd8fe74a8c0fbd1de93f79daaa4df7408b7ce4a5cce2074a67577c +size 7679 diff --git a/data/2025/2504_13xxx/2504.13176/images/4bb9d8f0e0b3f923a99112f702d822e720cb125c76e10189c0e33f9fe0f8cf42.jpg b/data/2025/2504_13xxx/2504.13176/images/4bb9d8f0e0b3f923a99112f702d822e720cb125c76e10189c0e33f9fe0f8cf42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff2397153fd6b47238a4c3e2f716bc9517222898 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/4bb9d8f0e0b3f923a99112f702d822e720cb125c76e10189c0e33f9fe0f8cf42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:631ea2f1b07beb2199dad525257f4f33a7d198afa97f2011f4021b8f78568fc1 +size 35931 diff --git a/data/2025/2504_13xxx/2504.13176/images/4d65aa6ae5367016dbf9357c4a96d0e34eb03e34a38102f66c3219c2e1d833d1.jpg b/data/2025/2504_13xxx/2504.13176/images/4d65aa6ae5367016dbf9357c4a96d0e34eb03e34a38102f66c3219c2e1d833d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd57eb34db6f86828a0d6bbfbad050005b89f284 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/4d65aa6ae5367016dbf9357c4a96d0e34eb03e34a38102f66c3219c2e1d833d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c38b9fd113a5dfc2be14db4b0552623271d0b365b4f8a8303bbb81b93e449b35 +size 5401 diff --git a/data/2025/2504_13xxx/2504.13176/images/58721a9e3534a0b017199e56a3e4e58e53b4032c76a821b5b8918bf453eafec2.jpg b/data/2025/2504_13xxx/2504.13176/images/58721a9e3534a0b017199e56a3e4e58e53b4032c76a821b5b8918bf453eafec2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61821c71a35a9d889082de1498fdf81d77aeb05c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/58721a9e3534a0b017199e56a3e4e58e53b4032c76a821b5b8918bf453eafec2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a20c5e9b59438236d1abe3dc21fd6b345ac1b83b5c5ee2a89764651dec4434fc +size 3559 diff --git a/data/2025/2504_13xxx/2504.13176/images/58d0650f24a575b2f82468b4fa675a9645b69d7fdb4dea11c4f4085cc207d7c3.jpg b/data/2025/2504_13xxx/2504.13176/images/58d0650f24a575b2f82468b4fa675a9645b69d7fdb4dea11c4f4085cc207d7c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51bc8755128fbb35fa8824af6ef611358d5f9acc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/58d0650f24a575b2f82468b4fa675a9645b69d7fdb4dea11c4f4085cc207d7c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0a76d1a533a08df9b8f082228e28ab89b8e7da68c1f2e70bf10857c6024f3f5 +size 39529 diff --git a/data/2025/2504_13xxx/2504.13176/images/5b74d79f3644eb1654f74a81b1adcdf5d1917432debbdacbab4166a6a8dba118.jpg b/data/2025/2504_13xxx/2504.13176/images/5b74d79f3644eb1654f74a81b1adcdf5d1917432debbdacbab4166a6a8dba118.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d58b4cd7e836e0749f8fc2ae45c2d25f6f727051 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/5b74d79f3644eb1654f74a81b1adcdf5d1917432debbdacbab4166a6a8dba118.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:608b8517842c8a1ecde2fbe81b84669388fe885e27dc7ffb38c4e715e1394245 +size 5049 diff --git a/data/2025/2504_13xxx/2504.13176/images/61e30aa29addae63474715749ba0ffbf5c696de92b586801186800fc3726cf11.jpg b/data/2025/2504_13xxx/2504.13176/images/61e30aa29addae63474715749ba0ffbf5c696de92b586801186800fc3726cf11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..317b259810288146cd79df8f6e5ef669c52807f2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/61e30aa29addae63474715749ba0ffbf5c696de92b586801186800fc3726cf11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6336ea798d01c8ed3c461224e91ba45f16021a539c589072138b37ebb8af231d +size 25258 diff --git a/data/2025/2504_13xxx/2504.13176/images/6200d8f841819a2ebd4f5e5c474a2ce4d32cfcbf74b8dfbe03fdb8337da294c7.jpg b/data/2025/2504_13xxx/2504.13176/images/6200d8f841819a2ebd4f5e5c474a2ce4d32cfcbf74b8dfbe03fdb8337da294c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0f63e55504eead6f6399c10f2c9caff0de5e18b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/6200d8f841819a2ebd4f5e5c474a2ce4d32cfcbf74b8dfbe03fdb8337da294c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a265ba0e6b3d4e7561f96c22d625e85b6f9e1c33b2670724b1fd6417247b475b +size 19461 diff --git a/data/2025/2504_13xxx/2504.13176/images/64a02efe184fb95c9778897310f672e19f2755ec83fe199f87014ef556ee84c1.jpg b/data/2025/2504_13xxx/2504.13176/images/64a02efe184fb95c9778897310f672e19f2755ec83fe199f87014ef556ee84c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d9c8665f57e825ad26925ff7325d9d9bb1ee5e5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/64a02efe184fb95c9778897310f672e19f2755ec83fe199f87014ef556ee84c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc65ad699c4ae3e4ca4d305dac896999bf8597184982469aa1158105113ab944 +size 31777 diff --git a/data/2025/2504_13xxx/2504.13176/images/66e6e0f583c671bacbde2f78f77f1b0f2a3e1e9a4818ff3254c5f43361254b11.jpg b/data/2025/2504_13xxx/2504.13176/images/66e6e0f583c671bacbde2f78f77f1b0f2a3e1e9a4818ff3254c5f43361254b11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29c0be753b9a688ea15f3b6b17bb3736439efe41 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/66e6e0f583c671bacbde2f78f77f1b0f2a3e1e9a4818ff3254c5f43361254b11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b37e4e4ff4c00bcca9b3a538990376354a8236727ab3d45e3302b96826382bb +size 11992 diff --git a/data/2025/2504_13xxx/2504.13176/images/6db6350dc95f55e02bea38610b5cce4f65ba7b193e8b6bad514799343caf2b2b.jpg b/data/2025/2504_13xxx/2504.13176/images/6db6350dc95f55e02bea38610b5cce4f65ba7b193e8b6bad514799343caf2b2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d297720614be391e22d2428830f74da3227ceab3 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/6db6350dc95f55e02bea38610b5cce4f65ba7b193e8b6bad514799343caf2b2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a89a5ce00f016f1593bf0c3dffe25bc69ffe89ce2c6f21091a2d222ca514983a +size 36190 diff --git a/data/2025/2504_13xxx/2504.13176/images/788e48ddfe0a081261d24ffc7f63ecfa0c2323f0e1a6000ec21737630ee71b22.jpg b/data/2025/2504_13xxx/2504.13176/images/788e48ddfe0a081261d24ffc7f63ecfa0c2323f0e1a6000ec21737630ee71b22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d90d7080ded682bc90eda5d6093fd68e1e05f3d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/788e48ddfe0a081261d24ffc7f63ecfa0c2323f0e1a6000ec21737630ee71b22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ec1b104f29070dfb21b307a4e1fe53823c4af607c0ddb00fe687d43a14aba33 +size 28854 diff --git a/data/2025/2504_13xxx/2504.13176/images/7cad529b8425d2abd1a82594116dc16ce40f1b2c1f8e922aa5d74b85b80f5814.jpg b/data/2025/2504_13xxx/2504.13176/images/7cad529b8425d2abd1a82594116dc16ce40f1b2c1f8e922aa5d74b85b80f5814.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99802463ceef97a68dc4fbb03f9dbf421ae427da --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/7cad529b8425d2abd1a82594116dc16ce40f1b2c1f8e922aa5d74b85b80f5814.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a047df111f92241d41c6612f9b6a72ff56d3aeef806cdbe9cb6986d8d7c7bb56 +size 9388 diff --git a/data/2025/2504_13xxx/2504.13176/images/834d6a64f5df6d2fb0ff04e31586fc15aec16332f5b005e9fecf16759317588f.jpg b/data/2025/2504_13xxx/2504.13176/images/834d6a64f5df6d2fb0ff04e31586fc15aec16332f5b005e9fecf16759317588f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af43df20c2e1ae86121643d4a901613b720d4653 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/834d6a64f5df6d2fb0ff04e31586fc15aec16332f5b005e9fecf16759317588f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfa7b942bbe2f1bf1a1147573becda9d5573938ff07925253c617493a2a17aa1 +size 15119 diff --git a/data/2025/2504_13xxx/2504.13176/images/87bcd73959fce6d8e915e15134827004325fc69c2bc2d357e668013804ed5736.jpg b/data/2025/2504_13xxx/2504.13176/images/87bcd73959fce6d8e915e15134827004325fc69c2bc2d357e668013804ed5736.jpg new file mode 100644 index 0000000000000000000000000000000000000000..469caf1ddf77af55484d21755c3641e265105cce --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/87bcd73959fce6d8e915e15134827004325fc69c2bc2d357e668013804ed5736.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f349f0b54dc19675a0507bc15e200bfa08e875c888351cb3a915f4c63af28a6 +size 10538 diff --git a/data/2025/2504_13xxx/2504.13176/images/88692487b4d265fac91ccd274914ea963a8e85d3d96e1a9f6c38b9f571456b6a.jpg b/data/2025/2504_13xxx/2504.13176/images/88692487b4d265fac91ccd274914ea963a8e85d3d96e1a9f6c38b9f571456b6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f2feb1eaae71a41ae8678558362c1b700183133 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/88692487b4d265fac91ccd274914ea963a8e85d3d96e1a9f6c38b9f571456b6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1374af19baad9274cbb84fe24bbaf67f9bee4a49a775c19f9184d4274c0a707 +size 65571 diff --git a/data/2025/2504_13xxx/2504.13176/images/8df82528834fc4a351d35f88f4433fb6cd1c95ff062a08c153f3d1f479e21123.jpg b/data/2025/2504_13xxx/2504.13176/images/8df82528834fc4a351d35f88f4433fb6cd1c95ff062a08c153f3d1f479e21123.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2db43d8260ccf0339ab74895c265a4439f1660f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/8df82528834fc4a351d35f88f4433fb6cd1c95ff062a08c153f3d1f479e21123.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3f438f194c0a8cbcf17f287c8ab5e06b2cb653cef49629fcf990948fb480bb8 +size 28556 diff --git a/data/2025/2504_13xxx/2504.13176/images/95e1be1017eb824a9064c72dfcb0e24d6268b917ef5154d0c09a78e835b4975e.jpg b/data/2025/2504_13xxx/2504.13176/images/95e1be1017eb824a9064c72dfcb0e24d6268b917ef5154d0c09a78e835b4975e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35bd1abd9e3e697e0947aa64637a5d4e59fca890 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/95e1be1017eb824a9064c72dfcb0e24d6268b917ef5154d0c09a78e835b4975e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a76f9b0cefafadd439fd845046afd9d86fe111ecf22c139988a2f3c41a5416 +size 9966 diff --git a/data/2025/2504_13xxx/2504.13176/images/9c18b45e307d8cd2513062aa4be434f3ee3c8311dc589d7a63ec57b42d24511f.jpg b/data/2025/2504_13xxx/2504.13176/images/9c18b45e307d8cd2513062aa4be434f3ee3c8311dc589d7a63ec57b42d24511f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64f670c96afe403442f0f4f437de84b07cc7b7e0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/9c18b45e307d8cd2513062aa4be434f3ee3c8311dc589d7a63ec57b42d24511f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91aefa4bb0081e775acd194644928aed41182fc42abe63b7220aea5b9163f1c6 +size 6750 diff --git a/data/2025/2504_13xxx/2504.13176/images/a5baa2afde9f8babea314ec85fc8461cf4b88aaa1b3a130882ea511a78b72e22.jpg b/data/2025/2504_13xxx/2504.13176/images/a5baa2afde9f8babea314ec85fc8461cf4b88aaa1b3a130882ea511a78b72e22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65a2bca42d911abcd9064082bd37e72718042262 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/a5baa2afde9f8babea314ec85fc8461cf4b88aaa1b3a130882ea511a78b72e22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1dce03a92df78e04b14a158ad0c0b0feeb3da32a00dd4a22bf8b65d966b2be5 +size 10230 diff --git a/data/2025/2504_13xxx/2504.13176/images/a8de64fc87ebbbb24fdc244dacf7fe6ce798d6fe30d9d7a74517f91cb90468d6.jpg b/data/2025/2504_13xxx/2504.13176/images/a8de64fc87ebbbb24fdc244dacf7fe6ce798d6fe30d9d7a74517f91cb90468d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cdbc1c6e1c2007df7aab89bec483f7ad0c452cf --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/a8de64fc87ebbbb24fdc244dacf7fe6ce798d6fe30d9d7a74517f91cb90468d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f37ade3b5acb3b325b1a5d3e606b5a567b49b8c0954b781ce4e767c23cf146d +size 34875 diff --git a/data/2025/2504_13xxx/2504.13176/images/aa2f2a8ae8e9a8d33b674723dccc45e54091ec86bb8aac4563e39acbdc86a1d9.jpg b/data/2025/2504_13xxx/2504.13176/images/aa2f2a8ae8e9a8d33b674723dccc45e54091ec86bb8aac4563e39acbdc86a1d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6f224ef59d4879198045b07e931ebf053e94a07 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/aa2f2a8ae8e9a8d33b674723dccc45e54091ec86bb8aac4563e39acbdc86a1d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9098840b53a8ba67b72b3568251ecc4ea2228f2dd31deca0bab1dd9d0003838 +size 8559 diff --git a/data/2025/2504_13xxx/2504.13176/images/c72d485e9c001e310bafde876dc639ecb4c97bb638128363a67326dbe08778ca.jpg b/data/2025/2504_13xxx/2504.13176/images/c72d485e9c001e310bafde876dc639ecb4c97bb638128363a67326dbe08778ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4fadf25f2dbabf74729920f3032a8607500cc42c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/c72d485e9c001e310bafde876dc639ecb4c97bb638128363a67326dbe08778ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37650fbe7d2f7be18a49a030056d9f021996914c44c69719fddf5b211925f197 +size 45218 diff --git a/data/2025/2504_13xxx/2504.13176/images/cb4c46b64e7895f57b8b540ab83c7a11fc75e7a22e0163f7d8735a205bd28267.jpg b/data/2025/2504_13xxx/2504.13176/images/cb4c46b64e7895f57b8b540ab83c7a11fc75e7a22e0163f7d8735a205bd28267.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9991e28c35282af2b6a99d1f877ed12bbaaba6c0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/cb4c46b64e7895f57b8b540ab83c7a11fc75e7a22e0163f7d8735a205bd28267.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52bcaf408ef3ea24647b24310dfd278f514a240b8d7bca411fce5b70e35cb0b4 +size 16604 diff --git a/data/2025/2504_13xxx/2504.13176/images/cdd497e7ffd20245df054abb3d36a3472a9d0bea12ff74109a4efe6fc01bb38f.jpg b/data/2025/2504_13xxx/2504.13176/images/cdd497e7ffd20245df054abb3d36a3472a9d0bea12ff74109a4efe6fc01bb38f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e77198c312542c0c7b295aae7259119f6f89a29e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/cdd497e7ffd20245df054abb3d36a3472a9d0bea12ff74109a4efe6fc01bb38f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924a8664df439b43f65016e4207bafe8237fbddc5936e5bd45a06331b286cad1 +size 5544 diff --git a/data/2025/2504_13xxx/2504.13176/images/d3dc9901c6ffb37b8aa90fce37338bfe51ad35aff524d666f4aa800a8bbe3e52.jpg b/data/2025/2504_13xxx/2504.13176/images/d3dc9901c6ffb37b8aa90fce37338bfe51ad35aff524d666f4aa800a8bbe3e52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d0b2b30d7919efdf6fa37460f4dad1c272a6815 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/d3dc9901c6ffb37b8aa90fce37338bfe51ad35aff524d666f4aa800a8bbe3e52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56e3799c9be5a611cdc838cb0030675d93c73a0f4932e4ee7cc838c7f0135f65 +size 63654 diff --git a/data/2025/2504_13xxx/2504.13176/images/d60da471dd8ed888ff57dc0cd6451794dd08310fe0916a3123d3bab2c314cb1a.jpg b/data/2025/2504_13xxx/2504.13176/images/d60da471dd8ed888ff57dc0cd6451794dd08310fe0916a3123d3bab2c314cb1a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e146d0cf89ec7ae1738e08689d70661ea19cf14b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/d60da471dd8ed888ff57dc0cd6451794dd08310fe0916a3123d3bab2c314cb1a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54f80258f21df6e4fbd3f4e408e7d0015801b84b2a2b5650fe3b65c7e51debc8 +size 9973 diff --git a/data/2025/2504_13xxx/2504.13176/images/ecb858eaa61509df14d51c16c06814ec844adac3bb7acb8a7e175d967928b21e.jpg b/data/2025/2504_13xxx/2504.13176/images/ecb858eaa61509df14d51c16c06814ec844adac3bb7acb8a7e175d967928b21e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e27a8ff22d4f5ae943f52a3f467129e418dacbbb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/ecb858eaa61509df14d51c16c06814ec844adac3bb7acb8a7e175d967928b21e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0436fb30bf2a5a274f277e1c8709f7502314936314ca45f80a16e44b3e4118a +size 147065 diff --git a/data/2025/2504_13xxx/2504.13176/images/ee977948bf238d1af469fbcbf6fb6da7daa33661a9b3df5feb126e4f4da014d3.jpg b/data/2025/2504_13xxx/2504.13176/images/ee977948bf238d1af469fbcbf6fb6da7daa33661a9b3df5feb126e4f4da014d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae931bd2c93b023221d946792ed8f34bb634a7f2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/ee977948bf238d1af469fbcbf6fb6da7daa33661a9b3df5feb126e4f4da014d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b7fb02392b1a2e71a8786fe49f24af280e75a97bc13585f81f501a2cf6f94fc +size 35946 diff --git a/data/2025/2504_13xxx/2504.13176/images/f8afae54c6c3fe55f79462f8c82eac82388206a87f2840c2e634af16eb091fed.jpg b/data/2025/2504_13xxx/2504.13176/images/f8afae54c6c3fe55f79462f8c82eac82388206a87f2840c2e634af16eb091fed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..774eca716928341fc2426c79334461bd51aea408 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/images/f8afae54c6c3fe55f79462f8c82eac82388206a87f2840c2e634af16eb091fed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:731771a5557a01f254560ee667cad290d6c89f496ec431a8d07a28a2d0ae1255 +size 71310 diff --git a/data/2025/2504_13xxx/2504.13176/layout.json b/data/2025/2504_13xxx/2504.13176/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..37951d23b6625d7c1298d4273eb545dfe9147862 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13176/layout.json @@ -0,0 +1,10126 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 56, + 56, + 553, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 56, + 553, + 111 + ], + "spans": [ + { + "bbox": [ + 56, + 56, + 553, + 111 + ], + "type": "text", + "content": "IMAGGarment: Fine-Grained Garment Generation for Controllable Fashion Design" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 117, + 519, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 117, + 519, + 130 + ], + "spans": [ + { + "bbox": [ + 86, + 117, + 519, + 130 + ], + "type": "text", + "content": "Fei Shen, Jian Yu, Cong Wang, Xin Jiang, Xiaoyu Du, and Jinhui Tang, Senior Member, IEEE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 175, + 301, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 175, + 301, + 416 + ], + "spans": [ + { + "bbox": [ + 45, + 175, + 301, + 416 + ], + "type": "text", + "content": "Abstract—This paper presents IMAGGarment, a fine-grained garment generation (FGG) framework that enables high-fidelity garment synthesis with precise control over silhouette, color, and logo placement. Unlike existing methods that are limited to single-condition inputs, IMAGGarment addresses the challenges of multi-conditional controllability in personalized fashion design and digital apparel applications. Specifically, IMAGGarment employs a two-stage training strategy to separately model global appearance and local details, while enabling unified and controllable generation through end-to-end inference. In the first stage, we propose a global appearance model that jointly encodes silhouette and color using a mixed attention module and a color adapter. In the second stage, we present a local enhancement model with an adaptive appearance-aware module to inject user-defined logos and spatial constraints, enabling accurate placement and visual consistency. To support this task, we release GarmentBench, a large-scale dataset comprising over 180K garment samples paired with multi-level design conditions, including sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that our method outperforms existing baselines, achieving superior structural stability, color fidelity, and local controllability performance. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 419, + 301, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 419, + 301, + 451 + ], + "spans": [ + { + "bbox": [ + 45, + 419, + 301, + 451 + ], + "type": "text", + "content": "Index Terms—Fine-Grained Garment Generation, Multi-Conditional Generation, Fashion Design Applications, Garment-Bench Dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 469, + 215, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 469, + 215, + 480 + ], + "spans": [ + { + "bbox": [ + 132, + 469, + 215, + 480 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 485, + 300, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 485, + 300, + 556 + ], + "spans": [ + { + "bbox": [ + 45, + 485, + 300, + 556 + ], + "type": "text", + "content": "Fine-Grained garment generation (FGG) aims to synthesize high-quality garments with precise control over garment silhouette, color scheme, logo content, and spatial placement. As personalized fashion and the digital apparel market grow rapidly, fine-grained controllability [1]–[4] is increasingly crucial for applications in fashion design and e-commerce." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 557, + 301, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 557, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 45, + 557, + 301, + 594 + ], + "type": "text", + "content": "In traditional garment ideation [5], [6] and visualization [7], [8], designers analyze line drawings to establish silhouette and construction, then select color palettes and materials, and" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 603, + 299, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 603, + 299, + 639 + ], + "spans": [ + { + "bbox": [ + 45, + 603, + 299, + 639 + ], + "type": "text", + "content": "Fei Shen is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the NExT++ Research Centre, National University of Singapore, Singapore, e-mail: shenfei29@nus.edu.sg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 639, + 301, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 639, + 301, + 675 + ], + "spans": [ + { + "bbox": [ + 45, + 639, + 301, + 675 + ], + "type": "text", + "content": "Jian Yu, Xin Jiang, and Xiaoyu Du are with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China. e-mail: jianyu@njust.edu.cn; xinjiang@njust.edu.cn; duxy@njust.edu.cn." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 675, + 301, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 675, + 301, + 702 + ], + "spans": [ + { + "bbox": [ + 45, + 675, + 301, + 702 + ], + "type": "text", + "content": "Cong Wang is with the State Key Laboratory for Novel Software Technology and the School of Computer Science, Nanjing University, Nanjing, 210023, China. e-mail: cw@smail.nju.edu.cn" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 702, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 702, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 702, + 301, + 748 + ], + "type": "text", + "content": "Jinhui Tang is with the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, 210094, China, and also with the College of Information Science and Technology and Artificial Intelligence, Nanjing Forestry University, Nanjing 210037, China, e-mail: jinhuitang@njust.edu.cn. (Corresponding author: Jinhui Tang.)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 174, + 564, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 564, + 317 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 564, + 317 + ], + "type": "text", + "content": "finally arrange brand elements such as logos and trims. This manual workflow has two persistent drawbacks. First, it is time consuming: to match the specification, edits must be applied object by object and view by view; in a seasonal collection, even identical panels within the same board are recolored or relabeled one at a time, which does not scale. Second, it is error prone and inconsistent: small deviations in hue, shading, or logo placement arise across artists and rounds of revision, yielding mismatches across styles, sizes, and camera viewpoints. As project scope grows, these issues inflate turnaround time and complicate quality control and version management." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 318, + 564, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 318, + 564, + 604 + ], + "spans": [ + { + "bbox": [ + 307, + 318, + 564, + 604 + ], + "type": "text", + "content": "Recently, image synthesis [9]–[12] has made notable progress in tasks such as sketch-to-image generation [13]–[16] and logo insertion [17]–[19] (as illustrated in Fig. 1 (a)), demonstrating basic capabilities in structural and content-level control. However, these tasks [13], [17], [20] provide only coarse guidance and rely on single-condition inputs (e.g., sketch or color), lacking the fine-grained controllability needed to model the nuanced interactions between global structure and local details in garment design. Although sequential or modular combinations may offer partial solutions, they [21]–[23] fail to explicitly disentangle and jointly model global attributes (e.g., silhouette, color) and local appearance details (e.g., logo content and spatial placement). Without unified control mechanisms, these approaches [21]–[23] often suffer from condition entanglement, conflicting objectives, and visual inconsistencies, ultimately falling short of the high standards required in real-world fashion design. In contrast, practical fashion design [5], [6] requires joint control over multiple interdependent factors: designers determine global attributes such as silhouette and color, followed by fine-tuning of local elements like logos and their placement. To support this process, a unified generation task that clearly separates and coordinates global and local attributes is essential for controllable and high-fidelity synthesis." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 605, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 605, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 605, + 564, + 748 + ], + "type": "text", + "content": "To address these limitations, we propose a new task: fine-grained garment generation (FGG), as illustrated in Fig. 1 (b). FGG is formulated as a unified multi-conditional garment synthesis task, taking a textual prompt, garment silhouette, color palette, and spatially constrained logos as joint inputs. It aims to generate garments that faithfully reflect high-level structural intent and fine-grained local styling cues. FGG is specifically designed to mirror real-world fashion workflows, where designers must coordinate diverse input modalities to express creative intent. Unlike conventional approaches that process each condition independently or sequentially, FGG emphasizes joint modeling and hierarchical reasoning across" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "type": "text", + "content": "arXiv:2504.13176v2 [cs.CV] 8 Sep 2025" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 45, + 179, + 156 + ], + "blocks": [ + { + "bbox": [ + 61, + 45, + 179, + 156 + ], + "lines": [ + { + "bbox": [ + 61, + 45, + 179, + 156 + ], + "spans": [ + { + "bbox": [ + 61, + 45, + 179, + 156 + ], + "type": "image", + "image_path": "0f5fef9dcc1b1ccc41c901842a791c5804778bc2b138eee313ea6ae6355ba813.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 194, + 45, + 304, + 156 + ], + "blocks": [ + { + "bbox": [ + 194, + 45, + 304, + 156 + ], + "lines": [ + { + "bbox": [ + 194, + 45, + 304, + 156 + ], + "spans": [ + { + "bbox": [ + 194, + 45, + 304, + 156 + ], + "type": "image", + "image_path": "834d6a64f5df6d2fb0ff04e31586fc15aec16332f5b005e9fecf16759317588f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 61, + 178, + 304, + 350 + ], + "blocks": [ + { + "bbox": [ + 91, + 164, + 261, + 176 + ], + "lines": [ + { + "bbox": [ + 91, + 164, + 261, + 176 + ], + "spans": [ + { + "bbox": [ + 91, + 164, + 261, + 176 + ], + "type": "text", + "content": "(a) Sketch-to-image and logo insertion task" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 178, + 304, + 350 + ], + "lines": [ + { + "bbox": [ + 61, + 178, + 304, + 350 + ], + "spans": [ + { + "bbox": [ + 61, + 178, + 304, + 350 + ], + "type": "image", + "image_path": "ee977948bf238d1af469fbcbf6fb6da7daa33661a9b3df5feb126e4f4da014d3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 206, + 353, + 419, + 365 + ], + "lines": [ + { + "bbox": [ + 206, + 353, + 419, + 365 + ], + "spans": [ + { + "bbox": [ + 206, + 353, + 419, + 365 + ], + "type": "text", + "content": "(c) Generalization capability in real-world applications" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 325, + 44, + 539, + 164 + ], + "blocks": [ + { + "bbox": [ + 325, + 44, + 539, + 164 + ], + "lines": [ + { + "bbox": [ + 325, + 44, + 539, + 164 + ], + "spans": [ + { + "bbox": [ + 325, + 44, + 539, + 164 + ], + "type": "image", + "image_path": "6200d8f841819a2ebd4f5e5c474a2ce4d32cfcbf74b8dfbe03fdb8337da294c7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 165, + 509, + 176 + ], + "lines": [ + { + "bbox": [ + 350, + 165, + 509, + 176 + ], + "spans": [ + { + "bbox": [ + 350, + 165, + 509, + 176 + ], + "type": "text", + "content": "(b) Fine-grained garment generation task" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 324, + 178, + 550, + 350 + ], + "blocks": [ + { + "bbox": [ + 324, + 178, + 550, + 350 + ], + "lines": [ + { + "bbox": [ + 324, + 178, + 550, + 350 + ], + "spans": [ + { + "bbox": [ + 324, + 178, + 550, + 350 + ], + "type": "image", + "image_path": "58d0650f24a575b2f82468b4fa675a9645b69d7fdb4dea11c4f4085cc207d7c3.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 374, + 565, + 403 + ], + "lines": [ + { + "bbox": [ + 45, + 374, + 565, + 403 + ], + "spans": [ + { + "bbox": [ + 45, + 374, + 565, + 403 + ], + "type": "text", + "content": "Fig. 1. Comparison of (a) existing sketch-to-image and logo insertion tasks with (b) our proposed fine-grained garment generation (FGG) task, which enables precise and controllable synthesis of garment structure, color, logo, and spatial placement. Unlike previous tasks that rely on a single input condition, FGG is tailored for real-world fashion design workflows by integrating multiple conditional controls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 423, + 301, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 423, + 301, + 579 + ], + "spans": [ + { + "bbox": [ + 45, + 423, + 301, + 579 + ], + "type": "text", + "content": "input types. It goes beyond simple task combinations by enforcing consistent integration of global and local attributes within a unified generation framework, enabling nuanced control over the overall structure and detailed appearance. Specifically, FGG task introduces three key challenges: (1) maintaining visual and semantic consistency across heterogeneous input conditions, (2) resolving conflicts between global structures and localized visual elements, and (3) generalizing to unseen condition combinations without retraining (see Fig. 1(c)). FGG thus marks a fundamental shift from single-condition or loosely coupled pipelines toward a unified, design-intent-driven generation paradigm that better reflects the complexity of real-world garment design." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 581, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 581, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 581, + 301, + 749 + ], + "type": "text", + "content": "To this end, we propose IMAGGarment, a two-stage training and end-to-end inference framework tailored for fine-grained garment generation. Unlike prior methods that rely on single-condition inputs or simple feature fusion, our framework is explicitly designed to achieve fine-grained controllability under multiple, interdependent constraints. In the first stage, we propose a global appearance model with a mixed attention module and a color adapter to jointly encode garment silhouette and color palette, improving overall appearance fidelity and mitigating condition entanglement. In the second stage, we present a local enhancement model equipped with an adaptive appearance-aware module to inject user-defined logos and their spatial constraints, enabling precise logo placement while preserving global consistency. To further promote research in" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 423, + 564, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 423, + 564, + 518 + ], + "spans": [ + { + "bbox": [ + 307, + 423, + 564, + 518 + ], + "type": "text", + "content": "this direction, we release GarmentBench, a large-scale dataset comprising over 180k garment samples annotated with rich multi-level design conditions, including silhouette sketches, color references, logo placements, and textual prompts. Extensive experiments demonstrate that IMAGGarment significantly outperforms existing baselines in terms of structural stability and local controllability. To summarize, the main contributions are listed as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 520, + 564, + 663 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 318, + 520, + 564, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 520, + 564, + 567 + ], + "spans": [ + { + "bbox": [ + 318, + 520, + 564, + 567 + ], + "type": "text", + "content": "- We propose IMAGGarment, a controllable garment generation framework that enables precise control over garment structure, color, and logo placement, addressing the challenges of FGG." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 319, + 568, + 564, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 568, + 564, + 616 + ], + "spans": [ + { + "bbox": [ + 319, + 568, + 564, + 616 + ], + "type": "text", + "content": "- We design a mixed attention module, color adapter, and adaptive appearance-aware module to disentangle global structure from local attributes, achieving fine-grained visual control and accurate spatial control." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 319, + 616, + 564, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 616, + 564, + 663 + ], + "spans": [ + { + "bbox": [ + 319, + 616, + 564, + 663 + ], + "type": "text", + "content": "- We release GarmentBench, a large-scale dataset with diverse garments and rich multi-conditional annotations, serving as a valuable benchmark for controllable garment generation research." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 308, + 665, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 665, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 665, + 564, + 749 + ], + "type": "text", + "content": "The remainder of this paper is organized as follows. Section II surveys prior work on garment generation, encompassing GAN-based techniques and diffusion-based controllable generation. Section III describes the proposed IMAGGarment methodology, comprising a global appearance model with mixed attention and a color adapter, a local enhancement model with the A3 module, and the associated training and" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 104 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 104 + ], + "type": "text", + "content": "inference strategies. Section IV presents the experimental protocol and results, including the GarmentBench dataset and evaluation metrics, implementation details, and results and analysis. Section V concludes the paper." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 114, + 219, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 114, + 219, + 125 + ], + "spans": [ + { + "bbox": [ + 128, + 114, + 219, + 125 + ], + "type": "text", + "content": "II. RELATED WORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 130, + 149, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 130, + 149, + 141 + ], + "spans": [ + { + "bbox": [ + 45, + 130, + 149, + 141 + ], + "type": "text", + "content": "A. GAN-Based Methods" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 144, + 301, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 144, + 301, + 337 + ], + "spans": [ + { + "bbox": [ + 45, + 144, + 301, + 337 + ], + "type": "text", + "content": "Early approaches [24]–[29] to garment generation predominantly build on generative adversarial networks (GANs) [30]–[32], with a major line devoted to sketch-to-image translation [33] that learns spatial mappings from structural cues. Representative systems such as DeepFaceDrawing [24] and DeepFaceEditing [25] decompose sketches into semantic components and progressively assemble photorealistic results, while DeepPortraitDrawing [26] extends this paradigm to full-body synthesis via local-to-global pipelines. Interactive frameworks [27] further introduce gating mechanisms for user-guided editing, and DALColor [34] combines WGAN-GP [35] with line-art colorization for refined appearance control. Beyond sketches, related GAN-based efforts explore pose- or part-guided generation [36], [37], leveraging learned warping or deformable alignment to better propagate structural constraints from sources to targets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 336, + 302, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 336, + 302, + 482 + ], + "spans": [ + { + "bbox": [ + 45, + 336, + 302, + 482 + ], + "type": "text", + "content": "However, these methods [24]–[27] are largely restricted to single-condition settings (e.g., sketches or poses alone), making it difficult to support real-world fashion scenarios that require joint control over multiple factors such as silhouette, garment layers, color/pattern, and local embellishments. Moreover, adversarial training is prone to instability and visual artifacts [32], [36], [37], and the reliance on paired or carefully aligned supervision limits robustness to occlusion, diverse body shapes, and open-world catalogs. As a result, while GAN-based pipelines can produce plausible textures under constrained conditions, they struggle to achieve reliable, fine-grained, and multi-conditional controllability at scale." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 495, + 165, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 495, + 165, + 506 + ], + "spans": [ + { + "bbox": [ + 45, + 495, + 165, + 506 + ], + "type": "text", + "content": "B. Diffusion-Based Methods" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 509, + 301, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 509, + 301, + 640 + ], + "spans": [ + { + "bbox": [ + 45, + 509, + 301, + 640 + ], + "type": "text", + "content": "Diffusion models [38]–[40] have achieved strong progress in conditional image generation owing to their iterative denoising process and flexible conditioning interfaces. To improve controllability with minimal modification to large backbones, plugin-based approaches such as IP-Adapter [21], ControlNet [22], and BLIP-Diffusion [41] inject external conditions (e.g., reference images, structural maps, or language cues) through lightweight adapters. In parallel, reference-guided or dual-stream designs [42]–[45] propagate features from exemplars alongside text/image prompts, thereby strengthening identity preservation and fine control during sampling." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 641, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 641, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 641, + 301, + 750 + ], + "type": "text", + "content": "In fashion-related applications, DiffCloth [46] supports localized garment edits via part-specific textual prompts, enabling independent control over regions such as sleeves and collars. For logo-centric generation, AnyLogo [18] adopts a dual-state denoising strategy to retain subtle logo details; LogoSticker [19] performs token-based injection to flexibly place logo elements; and RefDiffuser [17] leverages expert-driven plugins to enhance texture fidelity and spatial alignment. Despite these advances, most methods emphasize either global" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 347, + 82, + 524, + 284 + ], + "blocks": [ + { + "bbox": [ + 343, + 57, + 529, + 75 + ], + "lines": [ + { + "bbox": [ + 343, + 57, + 529, + 75 + ], + "spans": [ + { + "bbox": [ + 343, + 57, + 529, + 75 + ], + "type": "text", + "content": "TABLEI DEFINITIONS OF MAIN SYMBOLS USED IN THIS PAPER." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 347, + 82, + 524, + 284 + ], + "lines": [ + { + "bbox": [ + 347, + 82, + 524, + 284 + ], + "spans": [ + { + "bbox": [ + 347, + 82, + 524, + 284 + ], + "type": "table", + "html": "
NotationDefinition
tTimestep
ZtLatent feature at t step
ZmOutput of mixed attention
x0Real image
xtNoisy data at t step
GGarment image
LLogo image
MMask image
CgFeature of garment image
ClFeature of logo image
CmFeature of mask image
CsFeature of silhouette image
CcFeature of color image
CtFeature of text prompt
θgGlobal appearance model
θlLocal enhancement model
εGaussian noise
αtCumulative product of noise weights
wGuidance scale
αSilhouette scale
βColor scale
", + "image_path": "380582041820ddac62131e41cfc36154d411b7799852e3e145e7ca28d41e33bc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 304, + 564, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 304, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 304, + 564, + 365 + ], + "type": "text", + "content": "appearance control or localized editing in isolation. A unified framework that jointly models multiple design conditions, e.g., silhouette and layer topology together with color/pattern and local embellishments, while maintaining structural coherence across the denoising trajectory remains underexplored." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 390, + 378, + 482, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 378, + 482, + 389 + ], + "spans": [ + { + "bbox": [ + 390, + 378, + 482, + 389 + ], + "type": "text", + "content": "III. METHODOLOGY" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 393, + 564, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 564, + 429 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 564, + 429 + ], + "type": "text", + "content": "Symbol Definition. To introduce our IMAGGarment method more clearly, we define the main symbols used throughout the paper in TABLE I." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 429, + 564, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 429, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 308, + 429, + 564, + 538 + ], + "type": "text", + "content": "Task Definition. Given a garment silhouette, color palette, user-defined logo, location and an optional text description, fine-grained garment generation (FGG) aims to synthesize high-fidelity garment images with precise control over both global structure and local visual attributes. The key challenges lie in jointly modeling multi-conditional inputs, maintaining semantic and visual consistency across different design factors, and supporting controllable placement of fine-grained elements such as logos and color regions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 554, + 406, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 554, + 406, + 564 + ], + "spans": [ + { + "bbox": [ + 308, + 554, + 406, + 564 + ], + "type": "text", + "content": "A. Overall Framework" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 568, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 564, + 750 + ], + "type": "text", + "content": "To address the above challenges, we propose IMAGGarment, a conditional diffusion framework tailored for fine-grained garment generation. Our framework comprises two components: a global appearance model (stage I) and a local enhancement model (stage II), which explicitly disentangle and jointly control the global appearance and local details under multi-conditional guidance, enabling accurate synthesis of garment silhouette, color, and logo placement. As illustrated in Fig. 2, the global appearance model first generates a latent of coarse garment image conditioned on the textual prompt, garment silhouette, and color palette. Subsequently, the local enhancement model refines this latent representation by integrating user-defined logo and spatial constraint, producing the final high-fidelity garment image with fine-grained controllability. Specifically, the global appearance model (Section III-B)" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 46, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 54, + 300, + 119 + ], + "blocks": [ + { + "bbox": [ + 47, + 54, + 300, + 119 + ], + "lines": [ + { + "bbox": [ + 47, + 54, + 300, + 119 + ], + "spans": [ + { + "bbox": [ + 47, + 54, + 300, + 119 + ], + "type": "image", + "image_path": "cb4c46b64e7895f57b8b540ab83c7a11fc75e7a22e0163f7d8735a205bd28267.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 126, + 301, + 164 + ], + "lines": [ + { + "bbox": [ + 45, + 126, + 301, + 164 + ], + "spans": [ + { + "bbox": [ + 45, + 126, + 301, + 164 + ], + "type": "text", + "content": "Fig. 2. Visualization of the IMAGGarment inference pipeline. The global appearance model generates coarse latent from textual prompts, silhouettes, and colors. The local enhancement model then injects user-defined logos and spatial location constraints to produce the fine-grained garment." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 186, + 301, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 301, + 293 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 301, + 293 + ], + "type": "text", + "content": "leverages our proposed mixed attention module and color adapter to effectively capture global appearance features from textual descriptions, silhouettes, and colors, while mitigating entanglement among these conditions. The local enhancement model (Section III-C) introduces an adaptive appearance-aware module (" + }, + { + "bbox": [ + 45, + 186, + 301, + 293 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 45, + 186, + 301, + 293 + ], + "type": "text", + "content": " Module) that injects logo content and spatial location constraint into the latent space, achieving precise logo placement. Finally, the training and inference strategies used in IMAGGarment are summarized in Section III-D." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 316, + 208, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 208, + 327 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 208, + 327 + ], + "type": "text", + "content": "B. Stage I: Global Appearance Model" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 333, + 300, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 333, + 300, + 403 + ], + "spans": [ + { + "bbox": [ + 45, + 333, + 300, + 403 + ], + "type": "text", + "content": "Motivation. Existing garment generation methods [21]–[23] typically rely on single-condition inputs (e.g., sketch or text), causing entangled features and limited controllability. To resolve this, we propose a global appearance model that explicitly disentangles silhouette, color, and text, enabling precise multi-conditional control." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 405, + 301, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 405, + 301, + 608 + ], + "spans": [ + { + "bbox": [ + 45, + 405, + 301, + 608 + ], + "type": "text", + "content": "Architecture. As shown in the left of the Fig. 3, our global appearance model comprises two shared frozen VAE encoders, one frozen VAE decoder, a trainable silhouette UNet, a frozen text encoder, a trainable color adapter, and a denoising UNet with the proposed mixed attention. Specifically, we first utilize the frozen VAE encoder to project the input reference silhouette into the latent space. Subsequently, we employ a trainable silhouette UNet (structurally identical to the denoising UNet but without cross attention) to extract fine-grained silhouette features, which are then integrated into the frozen denoising UNet via our proposed mixed attention module. Meanwhile, textual features obtained from the frozen CLIP text encoder and color features extracted by the proposed color adapter are further fused into the denoising UNet through cross attention. After multiple denoising iterations, the model generates coarse garment images that precisely align with the reference silhouette and faithfully reflect user-specified color." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "spans": [ + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "type": "text", + "content": "Mixed Attention. To effectively incorporate reference silhouette features into the denoising UNet without compromising the generative capability of the original UNet, we propose a mixed attention module. As shown in Fig. 3, we extend all self attention layers in the denoising UNet to the proposed mixed attention, which introduces two additional learnable projection layers to align the silhouette features " + }, + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "type": "text", + "content": " with the latent features " + }, + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "type": "inline_equation", + "content": "Z_t" + }, + { + "bbox": [ + 45, + 609, + 301, + 705 + ], + "type": "text", + "content": ". Formally, the mixed attention is defined as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 712, + 300, + 750 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 712, + 300, + 750 + ], + "spans": [ + { + "bbox": [ + 51, + 712, + 300, + 750 + ], + "type": "interline_equation", + "content": "Z _ {m} = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}}\\right) V + \\alpha \\cdot \\operatorname {S o f t m a x} \\left(\\frac {Q \\left(K ^ {\\prime}\\right) ^ {T}}{\\sqrt {d}}\\right) V ^ {\\prime}, \\tag {1}", + "image_path": "d60da471dd8ed888ff57dc0cd6451794dd08310fe0916a3123d3bab2c314cb1a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 55, + 563, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 55, + 563, + 77 + ], + "spans": [ + { + "bbox": [ + 308, + 55, + 563, + 77 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 55, + 563, + 77 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 308, + 55, + 563, + 77 + ], + "type": "text", + "content": " is a hyperparameter controlling the strength of silhouette conditioning. The projections are computed as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 82, + 563, + 104 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 82, + 563, + 104 + ], + "spans": [ + { + "bbox": [ + 309, + 82, + 563, + 104 + ], + "type": "interline_equation", + "content": "Q = Z _ {t} W _ {q}, K = Z _ {t} W _ {k}, V = Z _ {t} W _ {v}, K ^ {\\prime} = C _ {s} W _ {k} ^ {\\prime}, V ^ {\\prime} = C _ {s} W _ {v} ^ {\\prime} \\tag {2}", + "image_path": "335dbfdc286a54b2bc0d2c1f0b09e7e683b15fb70e2cb7e562b37c3930e49a06.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "spans": [ + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "inline_equation", + "content": "W_{q}, W_{k}, W_{v}" + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "text", + "content": " are frozen parameters of linear projection layers, whereas " + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "inline_equation", + "content": "W_{k}^{\\prime}, W_{v}^{\\prime}" + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "text", + "content": " are newly introduced learnable parameters of projection layers initialized from " + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "inline_equation", + "content": "W_{k}" + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "inline_equation", + "content": "W_{v}" + }, + { + "bbox": [ + 307, + 105, + 563, + 187 + ], + "type": "text", + "content": ", respectively. Our mixed attention facilitates the seamless integration of silhouette features into the denoising UNet, thus ensuring that generated garments maintain precise spatial alignment with the reference silhouette." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "type": "text", + "content": "Color Adapter. Accurate color manipulation is essential for generating garments with fine-grained visual details, significantly enhancing visual quality and realism. However, as the base model's textual prompts cannot reliably produce the intended colors, discrepancies often arise between the generated and expected colors. To address this issue, we propose a dedicated color adapter that explicitly treats color as an independent controllable factor. Specifically, given a reference color image, we extract color features " + }, + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "type": "text", + "content": " using a frozen CLIP image encoder combined with a trainable linear layer. Subsequently, these color features are integrated into the denoising UNet via a cross attention mechanism, jointly with textual features " + }, + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "type": "inline_equation", + "content": "C_t" + }, + { + "bbox": [ + 307, + 188, + 564, + 354 + ], + "type": "text", + "content": " obtained from the frozen CLIP text encoder:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 358, + 563, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 358, + 563, + 387 + ], + "spans": [ + { + "bbox": [ + 313, + 358, + 563, + 387 + ], + "type": "interline_equation", + "content": "Z _ {n e w} = \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {t} ^ {T}}{\\sqrt {d}}\\right) V _ {t} + \\beta \\cdot \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {c} ^ {T}}{\\sqrt {d}}\\right) V _ {c}, \\tag {3}", + "image_path": "1ee2571ac0b53d09bb1435f8aa3ce3aa1cd081b0f40908cec27aed899cd2e78e.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "spans": [ + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "Q = Z_{t}W_{q}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "K_{t} = C_{t}W_{k}^{t}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "V_{t} = C_{t}W_{v}^{t}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "K_{c} = C_{c}W_{k}^{c}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "V_{c} = C_{c}W_{v}^{c}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "W_{k}^{t}, W_{v}^{t}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": " denote frozen parameters of the original cross attention layers in the denoising UNet, while " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "W_{k}^{c}, W_{v}^{c}" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": " are newly introduced trainable projection layers. The hyperparameter " + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 308, + 391, + 563, + 475 + ], + "type": "text", + "content": " modulates the adapter's influence, ensuring precise alignment between generated colors and user specifications." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 493, + 476, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 493, + 476, + 504 + ], + "spans": [ + { + "bbox": [ + 309, + 493, + 476, + 504 + ], + "type": "text", + "content": "C. Stage II: Local Enhancement Model" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 508, + 564, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 508, + 564, + 616 + ], + "spans": [ + { + "bbox": [ + 307, + 508, + 564, + 616 + ], + "type": "text", + "content": "Motivation. Existing methods [18], [19] typically neglect detailed logo integration or treat it as a separate task, causing poor spatial alignment and visual inconsistency. To address this limitation, we propose a local enhancement model equipped with an adaptive appearance-aware " + }, + { + "bbox": [ + 307, + 508, + 564, + 616 + ], + "type": "inline_equation", + "content": "(A^3)" + }, + { + "bbox": [ + 307, + 508, + 564, + 616 + ], + "type": "text", + "content": " module, explicitly injecting user-defined logos and spatial constraints into the latent space. This design enables precise, consistent control over localized garment details, significantly enhancing visual fidelity." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "text", + "content": "Architecture. As illustrated on the right of Fig. 3, the local enhancement model comprises a frozen VAE encoder and decoder, a denoising UNet, and an adaptive appearance-aware module (" + }, + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "text", + "content": " module). The " + }, + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "text", + "content": " module fuses local conditions, such as logos and spatial constraints, by concatenating them along spatial or channel dimensions, enabling precise control over fine-grained visual elements. Given a garment, logo, and placement mask, the model adaptively adjusts the logo's size and position while preserving its visual fidelity. To reduce redundancy and focus on local detail refinement, we optimize only the self attention layers of the denoising UNet and discard" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 55, + 312, + 231 + ], + "blocks": [ + { + "bbox": [ + 47, + 55, + 312, + 231 + ], + "lines": [ + { + "bbox": [ + 47, + 55, + 312, + 231 + ], + "spans": [ + { + "bbox": [ + 47, + 55, + 312, + 231 + ], + "type": "image", + "image_path": "c72d485e9c001e310bafde876dc639ecb4c97bb638128363a67326dbe08778ca.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 240, + 565, + 281 + ], + "lines": [ + { + "bbox": [ + 45, + 240, + 565, + 281 + ], + "spans": [ + { + "bbox": [ + 45, + 240, + 565, + 281 + ], + "type": "text", + "content": "Fig. 3. Overview of our IMAGGarment framework. IMAGGarment is a two-stage conditional diffusion framework for fine-grained garment generation. The global appearance model first synthesizes a coarse latent representation from the input text prompt, silhouette, and color palette using a parallel UNet with mixed attention and a color adapter. The local enhancement model then refines this latent by injecting user-defined logos and location constraints through the proposed " + }, + { + "bbox": [ + 45, + 240, + 565, + 281 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 45, + 240, + 565, + 281 + ], + "type": "text", + "content": " module, enabling precise logo placement and high-fidelity garment generation." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 313, + 56, + 563, + 230 + ], + "blocks": [ + { + "bbox": [ + 313, + 56, + 563, + 230 + ], + "lines": [ + { + "bbox": [ + 313, + 56, + 563, + 230 + ], + "spans": [ + { + "bbox": [ + 313, + 56, + 563, + 230 + ], + "type": "image", + "image_path": "00509be361fe8ae840f0c55a7033c936f301abc0d110fd4824306c662b54050d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 298, + 301, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 298, + 301, + 321 + ], + "spans": [ + { + "bbox": [ + 45, + 298, + 301, + 321 + ], + "type": "text", + "content": "all cross attention layers, as the global appearance model has already encoded the textual information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "spans": [ + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": " Module. To precisely integrate fine-grained logo details into designated garment regions, we introduce the adaptive appearance-aware " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "(A^3)" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": " module. By fusing image-based conditions across specific dimensions, our " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": " module enables precise and consistent logo integration. Specifically, given a coarse garment image " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": ", a logo image " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": ", and a binary placement mask " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": ", we first encode them using a frozen VAE encoder to obtain their corresponding latent features: " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "C_g \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{8}}" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "C_l \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{8}}" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": ". The mask " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": " is resized via nearest-neighbor interpolation to match the latent resolution, resulting in " + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "inline_equation", + "content": "C_m \\in \\mathbb{R}^{1 \\times \\frac{H}{8} \\times \\frac{W}{8}}" + }, + { + "bbox": [ + 45, + 321, + 301, + 466 + ], + "type": "text", + "content": ". We then construct the spatially aligned conditional input as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 468, + 299, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 468, + 299, + 485 + ], + "spans": [ + { + "bbox": [ + 74, + 468, + 299, + 485 + ], + "type": "interline_equation", + "content": "X = \\operatorname {C o n c a t} \\left(C _ {g} \\otimes C _ {m}, C _ {l}\\right), \\quad X \\in \\mathbb {R} ^ {4 \\times \\frac {H}{8} \\times \\frac {W}{4}}, \\tag {4}", + "image_path": "382b919c432f4d91a756ad7b59aeee5ad88c01678449e8ec4d62479a1835bc33.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "text", + "content": " denotes element-wise multiplication and Concat indicates spatial concatenation along the width dimension. To align with " + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "text", + "content": ", the resized mask " + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "inline_equation", + "content": "C_m" + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "text", + "content": " is zero-padded to obtain " + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "inline_equation", + "content": "C_M \\in \\mathbb{R}^{1 \\times \\frac{H}{8} \\times \\frac{W}{4}}" + }, + { + "bbox": [ + 45, + 488, + 301, + 548 + ], + "type": "text", + "content": ". Next, we concatenate the garment and logo features to form a clean latent representation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 125, + 553, + 299, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 553, + 299, + 567 + ], + "spans": [ + { + "bbox": [ + 125, + 553, + 299, + 567 + ], + "type": "interline_equation", + "content": "x _ {0} = \\operatorname {C o n c a t} \\left(C _ {g}, C _ {l}\\right), \\tag {5}", + "image_path": "58721a9e3534a0b017199e56a3e4e58e53b4032c76a821b5b8918bf453eafec2.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 571, + 269, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 571, + 269, + 583 + ], + "spans": [ + { + "bbox": [ + 45, + 571, + 269, + 583 + ], + "type": "text", + "content": "and inject noise consistent with the diffusion process:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 80, + 586, + 299, + 601 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 586, + 299, + 601 + ], + "spans": [ + { + "bbox": [ + 80, + 586, + 299, + 601 + ], + "type": "interline_equation", + "content": "x _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} \\cdot x _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\cdot \\epsilon , \\quad \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I}), \\tag {6}", + "image_path": "5b74d79f3644eb1654f74a81b1adcdf5d1917432debbdacbab4166a6a8dba118.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "spans": [ + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": " denotes the clean latent feature obtained by concatenating garment and logo features, and " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "x_{t} \\in \\mathbb{R}^{4 \\times \\frac{H}{8} \\times \\frac{W}{4}}" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": " is the corresponding noisy latent at diffusion timestep " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "\\bar{\\alpha}_{t}" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": " is the cumulative product of the noise schedule coefficients, and " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": " is the Gaussian noise sampled from " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": ". Finally, the full model input is obtained by concatenating the noisy latent " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": ", the padded mask " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "C_M" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": ", and the aligned conditional input " + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 45, + 605, + 301, + 701 + ], + "type": "text", + "content": " along the channel dimension:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 78, + 704, + 299, + 720 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 704, + 299, + 720 + ], + "spans": [ + { + "bbox": [ + 78, + 704, + 299, + 720 + ], + "type": "interline_equation", + "content": "Z = \\operatorname {C o n c a t} \\left(x _ {t}, C _ {M}, X\\right), \\quad Z \\in \\mathbb {R} ^ {9 \\times \\frac {H}{8} \\times \\frac {W}{4}}. \\tag {7}", + "image_path": "cdd497e7ffd20245df054abb3d36a3472a9d0bea12ff74109a4efe6fc01bb38f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "content": "This channel-wise concatenation allows the model to jointly reason over appearance, spatial constraints, and guidance" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 298, + 563, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 298, + 563, + 323 + ], + "spans": [ + { + "bbox": [ + 308, + 298, + 563, + 323 + ], + "type": "text", + "content": "signals, while maintaining compatibility with the UNet architecture for spatially aware logo synthesis." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 342, + 421, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 342, + 421, + 354 + ], + "spans": [ + { + "bbox": [ + 309, + 342, + 421, + 354 + ], + "type": "text", + "content": "D. Training and Inference" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 358, + 564, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 358, + 564, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 358, + 564, + 491 + ], + "type": "text", + "content": "Training. The training process is divided into two stages, each targeting a specific set of objectives with separate optimization strategies. We first train the global appearance model independently to generate a semantically coherent garment representation conditioned on silhouette and color. After verifying its performance, we freeze it and train the local enhancement model to inject fine-grained logos guided by spatial masks. This sequential training avoids gradient interference between heterogeneous objectives and ensures each module converges toward its task-specific goal. Both stages adopt mean squared error (MSE) loss to supervise the denoising process." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "spans": [ + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": "Stage I. The global appearance model " + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "inline_equation", + "content": "\\theta_{g}" + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": " is trained to synthesize garments that align with the target silhouette and color under textual guidance. To preserve the generative capacity of the pretrained denoising UNet, we freeze all parameters except those of the silhouette UNet and the cross-attention projections in the mixed attention module. Given silhouette features " + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": ", text embeddings " + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "inline_equation", + "content": "C_t" + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": ", and color features " + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": ", we adopt a decoupled training strategy with " + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{silhouette}}" + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{color}}" + }, + { + "bbox": [ + 308, + 491, + 564, + 598 + ], + "type": "text", + "content": " losses:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 332, + 603, + 563, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 603, + 563, + 624 + ], + "spans": [ + { + "bbox": [ + 332, + 603, + 563, + 624 + ], + "type": "interline_equation", + "content": "L _ {\\text {s i l h o u e t t e}} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {t}, C _ {s}, t} \\| \\epsilon - \\epsilon_ {\\theta_ {g}} (x _ {t}, C _ {t}, C _ {s}, t) \\| ^ {2}, \\tag {8}", + "image_path": "0816818dd69391867c1691bd9d8e9eaaba2592bff23060e93b57356b9fac22f7.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 347, + 621, + 539, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 621, + 539, + 637 + ], + "spans": [ + { + "bbox": [ + 347, + 621, + 539, + 637 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {c o l o r}} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {t}, C _ {c}, t} \\left\\| \\epsilon - \\epsilon_ {\\theta_ {g}} (x _ {t}, C _ {t}, C _ {c}, t) \\right\\| ^ {2},", + "image_path": "4d65aa6ae5367016dbf9357c4a96d0e34eb03e34a38102f66c3219c2e1d833d1.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "spans": [ + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "text", + "content": " is the added noise and " + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta_g}" + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "text", + "content": " is the prediction from the global appearance model at timestep " + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 308, + 643, + 563, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "content": "Stage II. The local enhancement model " + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "inline_equation", + "content": "\\theta_{l}" + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "content": " refines the coarse latent by injecting logos at user-defined locations. To reduce overhead, we fine-tune only the self-attention layers of the logo UNet. Given logo feature " + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "inline_equation", + "content": "C_l" + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "content": ", spatial mask " + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "inline_equation", + "content": "C_m" + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "content": ", and garment latent " + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "inline_equation", + "content": "C_g" + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "content": ", the training objective " + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{logo}}" + }, + { + "bbox": [ + 308, + 666, + 564, + 727 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 318, + 734, + 563, + 750 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 734, + 563, + 750 + ], + "spans": [ + { + "bbox": [ + 318, + 734, + 563, + 750 + ], + "type": "interline_equation", + "content": "L _ {\\log o} = \\mathbb {E} _ {x _ {0}, \\epsilon , C _ {l}, C _ {m}, C _ {g}, t} \\| \\epsilon - \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) \\| ^ {2}, \\tag {9}", + "image_path": "9c18b45e307d8cd2513062aa4be434f3ee3c8311dc589d7a63ec57b42d24511f.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 57, + 211, + 235 + ], + "blocks": [ + { + "bbox": [ + 63, + 57, + 211, + 235 + ], + "lines": [ + { + "bbox": [ + 63, + 57, + 211, + 235 + ], + "spans": [ + { + "bbox": [ + 63, + 57, + 211, + 235 + ], + "type": "image", + "image_path": "64a02efe184fb95c9778897310f672e19f2755ec83fe199f87014ef556ee84c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 217, + 57, + 372, + 236 + ], + "blocks": [ + { + "bbox": [ + 217, + 57, + 372, + 236 + ], + "lines": [ + { + "bbox": [ + 217, + 57, + 372, + 236 + ], + "spans": [ + { + "bbox": [ + 217, + 57, + 372, + 236 + ], + "type": "image", + "image_path": "a8de64fc87ebbbb24fdc244dacf7fe6ce798d6fe30d9d7a74517f91cb90468d6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 237, + 358, + 249 + ], + "lines": [ + { + "bbox": [ + 220, + 237, + 358, + 249 + ], + "spans": [ + { + "bbox": [ + 220, + 237, + 358, + 249 + ], + "type": "text", + "content": "(a) Dataset Construction Pipeline" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 380, + 57, + 548, + 236 + ], + "blocks": [ + { + "bbox": [ + 380, + 57, + 548, + 236 + ], + "lines": [ + { + "bbox": [ + 380, + 57, + 548, + 236 + ], + "spans": [ + { + "bbox": [ + 380, + 57, + 548, + 236 + ], + "type": "image", + "image_path": "6db6350dc95f55e02bea38610b5cce4f65ba7b193e8b6bad514799343caf2b2b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 62, + 251, + 211, + 332 + ], + "blocks": [ + { + "bbox": [ + 62, + 251, + 211, + 332 + ], + "lines": [ + { + "bbox": [ + 62, + 251, + 211, + 332 + ], + "spans": [ + { + "bbox": [ + 62, + 251, + 211, + 332 + ], + "type": "image", + "image_path": "66e6e0f583c671bacbde2f78f77f1b0f2a3e1e9a4818ff3254c5f43361254b11.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 236, + 251, + 390, + 332 + ], + "blocks": [ + { + "bbox": [ + 236, + 251, + 390, + 332 + ], + "lines": [ + { + "bbox": [ + 236, + 251, + 390, + 332 + ], + "spans": [ + { + "bbox": [ + 236, + 251, + 390, + 332 + ], + "type": "image", + "image_path": "326abcc7b866a086fbf4b235cb7a6accdd96a7ecdf6f25d4dae72b4360477921.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 413, + 251, + 548, + 332 + ], + "blocks": [ + { + "bbox": [ + 413, + 251, + 548, + 332 + ], + "lines": [ + { + "bbox": [ + 413, + 251, + 548, + 332 + ], + "spans": [ + { + "bbox": [ + 413, + 251, + 548, + 332 + ], + "type": "image", + "image_path": "3e41e76eb094e74b4d129e749105281a6ca3f501c3a424d666aa1e98c60a1ef4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 62, + 334, + 211, + 416 + ], + "blocks": [ + { + "bbox": [ + 62, + 334, + 211, + 416 + ], + "lines": [ + { + "bbox": [ + 62, + 334, + 211, + 416 + ], + "spans": [ + { + "bbox": [ + 62, + 334, + 211, + 416 + ], + "type": "image", + "image_path": "87bcd73959fce6d8e915e15134827004325fc69c2bc2d357e668013804ed5736.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 196, + 418, + 384, + 431 + ], + "lines": [ + { + "bbox": [ + 196, + 418, + 384, + 431 + ], + "spans": [ + { + "bbox": [ + 196, + 418, + 384, + 431 + ], + "type": "text", + "content": "(b) Samples from the GarmentBench Dataset" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 238, + 334, + 382, + 416 + ], + "blocks": [ + { + "bbox": [ + 238, + 334, + 382, + 416 + ], + "lines": [ + { + "bbox": [ + 238, + 334, + 382, + 416 + ], + "spans": [ + { + "bbox": [ + 238, + 334, + 382, + 416 + ], + "type": "image", + "image_path": "95e1be1017eb824a9064c72dfcb0e24d6268b917ef5154d0c09a78e835b4975e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 441, + 563, + 460 + ], + "lines": [ + { + "bbox": [ + 45, + 441, + 563, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 441, + 563, + 460 + ], + "type": "text", + "content": "Fig. 4. Overview of GarmentBench dataset construction pipeline and samples. (a) Data construction pipeline for GarmentBench. (b) Example samples with multimodal annotations: silhouette, logo, text, logo location, and color." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 406, + 334, + 548, + 416 + ], + "blocks": [ + { + "bbox": [ + 406, + 334, + 548, + 416 + ], + "lines": [ + { + "bbox": [ + 406, + 334, + 548, + 416 + ], + "spans": [ + { + "bbox": [ + 406, + 334, + 548, + 416 + ], + "type": "image", + "image_path": "a5baa2afde9f8babea314ec85fc8461cf4b88aaa1b3a130882ea511a78b72e22.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 481, + 301, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 481, + 301, + 503 + ], + "spans": [ + { + "bbox": [ + 45, + 481, + 301, + 503 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 481, + 301, + 503 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta_l}" + }, + { + "bbox": [ + 45, + 481, + 301, + 503 + ], + "type": "text", + "content": " denotes the prediction from the local enhancement model." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 45, + 508, + 300, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 508, + 300, + 581 + ], + "spans": [ + { + "bbox": [ + 45, + 508, + 300, + 581 + ], + "type": "text", + "content": "Inference. IMAGGarment supports end-to-end inference through a two-stage pipeline operating in a shared latent space. The global appearance model first generates a latent of coarse garment image conditioned on the input text prompt, silhouette, color, and mask. This process is guided by classifier-free guidance (CFG) [47]:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 64, + 599, + 299, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 599, + 299, + 628 + ], + "spans": [ + { + "bbox": [ + 64, + 599, + 299, + 628 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\check {\\epsilon} _ {\\theta_ {g}} \\left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\\right) = w \\cdot \\epsilon_ {\\theta_ {g}} \\left(x _ {t}, C _ {t}, C _ {s}, C _ {c}, t\\right) \\tag {10} \\\\ + (1 - w) \\cdot \\epsilon_ {\\theta_ {g}} \\left(x _ {t}, t\\right) \\\\ \\end{array}", + "image_path": "aa2f2a8ae8e9a8d33b674723dccc45e54091ec86bb8aac4563e39acbdc86a1d9.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "spans": [ + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "text", + "content": "here, " + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "text", + "content": " is the CFG scale and " + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "text", + "content": " denotes the noisy latent at timestep " + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "text", + "content": ". The coarse latent is then refined by the local enhancement model, which incorporates user-defined logos and spatial constraints through the " + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 45, + 646, + 301, + 705 + ], + "type": "text", + "content": " module. We apply conditional CFG:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 723, + 299, + 752 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 723, + 299, + 752 + ], + "spans": [ + { + "bbox": [ + 52, + 723, + 299, + 752 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\check {\\epsilon} _ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) = w \\cdot \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {l}, C _ {m}, C _ {g}, t\\right) \\tag {11} \\\\ + (1 - w) \\cdot \\epsilon_ {\\theta_ {l}} \\left(x _ {t}, C _ {m}, C _ {g}, t\\right) \\\\ \\end{array}", + "image_path": "7cad529b8425d2abd1a82594116dc16ce40f1b2c1f8e922aa5d74b85b80f5814.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 395, + 481, + 478, + 492 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 481, + 478, + 492 + ], + "spans": [ + { + "bbox": [ + 395, + 481, + 478, + 492 + ], + "type": "text", + "content": "IV. EXPERIMENTS" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 502, + 410, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 502, + 410, + 512 + ], + "spans": [ + { + "bbox": [ + 308, + 502, + 410, + 512 + ], + "type": "text", + "content": "A. Dataset and Metrics" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 519, + 563, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 519, + 563, + 602 + ], + "spans": [ + { + "bbox": [ + 307, + 519, + 563, + 602 + ], + "type": "text", + "content": "Dataset Construction. As shown in Fig. 4 (a), we construct and release GarmentBench, a large-scale dataset for fine-grained garment generation, containing multi-modal design conditions such as text, sketches, colors, logos, and location masks. It serves as a controllable and extensible benchmark for advancing personalized fashion generation. The construction process is as follows:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 604, + 564, + 748 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 308, + 604, + 564, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 604, + 564, + 688 + ], + "spans": [ + { + "bbox": [ + 308, + 604, + 564, + 688 + ], + "type": "text", + "content": "(1) Image Collection and Preprocessing. We collect over 189K high-quality garment images from the internet, covering a wide range of categories such as tops, bottoms, and dresses. To eliminate background distractions and focus on the garment region, we apply YOLOv8 [48] for clothing detection and perform tight cropping to obtain clean garment-centric images for further processing." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 689, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 689, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 689, + 564, + 748 + ], + "type": "text", + "content": "(2) Text, Sketch, and Color Extraction. For each image, we automatically generate three auxiliary conditions to simulate real-world design guidance: textual descriptions generated by the multi-modal LLM Qwen-VL-Chat [49], covering key attributes such as color, silhouette, and style; structural sketches" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 102 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 102 + ], + "type": "text", + "content": "obtained using Informative-Drawings [50], providing shape and layout priors; and color palettes extracted from single-color garments identified via ResNet50 [51] and clustered using K-means [52]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 102, + 301, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 102, + 301, + 209 + ], + "spans": [ + { + "bbox": [ + 45, + 102, + 301, + 209 + ], + "type": "text", + "content": "(3) Logo Extraction and Location Annotation. To support logo insertion and spatial control, we further extract local design elements such as logos and prints. We use YOLOv8 to detect visually distinct regions (e.g., anime characters, animal patterns), followed by manual verification to ensure label quality. We also annotate spatial locations and generate binary masks to serve as precise spatial constraints. In total, GarmentBench contains 189,966 garment-condition pairs with rich fine-grained annotations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 210, + 301, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 210, + 301, + 317 + ], + "spans": [ + { + "bbox": [ + 45, + 210, + 301, + 317 + ], + "type": "text", + "content": "Dataset Description. As shown in Fig. 4 (b), we present representative samples from the GarmentBench dataset, which include fine-grained garment images paired with multi-modal conditions such as textual descriptions, structural silhouettes, color references, logos, and spatial location masks. Additionally, we randomly sample images from the Fashion-ControlNet-Dataset-V31 and apply the same preprocessing pipeline as GarmentBench to construct a test set with 1,267 image-condition pairs for evaluation and comparative analysis." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 318, + 301, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 318, + 301, + 413 + ], + "spans": [ + { + "bbox": [ + 45, + 318, + 301, + 413 + ], + "type": "text", + "content": "Dataset Statement. GarmentBench is curated from publicly available fashion imagery under a non-commercial research intent. All personal identifiers were removed; third-party logos and brand marks are included solely to evaluate controllability and remain the property of their respective owners. We release only derived annotations and source URLs (not raw images), together with license notices and a takedown procedure; exact split indices and random seeds are provided for reproducibility." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 414, + 301, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 414, + 301, + 582 + ], + "spans": [ + { + "bbox": [ + 45, + 414, + 301, + 582 + ], + "type": "text", + "content": "Evaluation Metrics. We adopt four metrics to comprehensively evaluate visual quality, conditional consistency, and fine-grained controllability. Fréchet inception distance (FID) [53] measures the distribution similarity between generated and real images, reflecting overall realism. Color structure similarity (CSS) [54] assesses the consistency of color distribution, measuring color controllability. Lastly, Logo location accuracy (LLA) [55] quantifies the spatial deviation between generated and target logo positions, reflecting spatial precision. Learned perceptual image patch similarity (LPIPS) [56] reflects human-perceived visual similarity, effectively capturing structural and textural consistency. These metrics comprehensively assess quality and controllability in fine-grained garment generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 599, + 159, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 599, + 159, + 609 + ], + "spans": [ + { + "bbox": [ + 45, + 599, + 159, + 609 + ], + "type": "text", + "content": "B. Implementation Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 613, + 301, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 613, + 301, + 698 + ], + "spans": [ + { + "bbox": [ + 45, + 613, + 301, + 698 + ], + "type": "text", + "content": "In our experiments, both the silhouette UNet and the denoising UNet are initialized with the pretrained Stable Diffusion v1.5 model2. The local enhancement model is based on the inpainting variant of Stable Diffusion v1.53, with only the self-attention layers being fine-tuned to reduce computational cost. We adopt OpenCLIP ViT-H/144 as the CLIP image encoder. All input images are resized to " + }, + { + "bbox": [ + 45, + 613, + 301, + 698 + ], + "type": "inline_equation", + "content": "512 \\times 640" + }, + { + "bbox": [ + 45, + 613, + 301, + 698 + ], + "type": "text", + "content": " resolution. We" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 708, + 292, + 748 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 708, + 292, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 708, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 708, + 292, + 718 + ], + "type": "text", + "content": "1https://huggingface.co/datasets/Abrumu/Fashion_controlnet_dataset_V3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 54, + 718, + 274, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 718, + 274, + 727 + ], + "spans": [ + { + "bbox": [ + 54, + 718, + 274, + 727 + ], + "type": "text", + "content": "2https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 54, + 727, + 291, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 727, + 291, + 738 + ], + "spans": [ + { + "bbox": [ + 54, + 727, + 291, + 738 + ], + "type": "text", + "content": "3https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 54, + 738, + 201, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 738, + 201, + 748 + ], + "spans": [ + { + "bbox": [ + 54, + 738, + 201, + 748 + ], + "type": "text", + "content": "4 https://github.com/mlfoundations/open Clip" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "type": "table", + "bbox": [ + 312, + 91, + 567, + 163 + ], + "blocks": [ + { + "bbox": [ + 316, + 57, + 556, + 84 + ], + "lines": [ + { + "bbox": [ + 316, + 57, + 556, + 84 + ], + "spans": [ + { + "bbox": [ + 316, + 57, + 556, + 84 + ], + "type": "text", + "content": "TABLE II QUANTITATIVE COMPARISONS ON GARMENTBENCH. OURS ACHIEVES THE TOP RESULTS ACROSS ALL METRICS, WITH BEST IN BOLD." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 91, + 567, + 163 + ], + "lines": [ + { + "bbox": [ + 312, + 91, + 567, + 163 + ], + "spans": [ + { + "bbox": [ + 312, + 91, + 567, + 163 + ], + "type": "table", + "html": "
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
BLIP-Diffusion* [41]101.99104.440.130.68
ControlNet-Garment* [22]41.2283.300.360.41
AnyDoor* [59]38.0868.240.650.17
IP-Adapter-Garment* [21]37.9592.950.360.43
IMAGGarment (Ours)17.6336.160.720.10
", + "image_path": "8df82528834fc4a351d35f88f4433fb6cd1c95ff062a08c153f3d1f479e21123.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 315, + 163, + 497, + 171 + ], + "lines": [ + { + "bbox": [ + 315, + 163, + 497, + 171 + ], + "spans": [ + { + "bbox": [ + 315, + 163, + 497, + 171 + ], + "type": "text", + "content": "* denotes re-implemented by us for a fair comparison." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "spans": [ + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "text", + "content": "use the AdamW optimizer [57] with a constant learning rate of " + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "text", + "content": ". The global appearance model and the local enhancement model are trained for 150K and 50K steps, respectively, using a batch size of 20. During inference, we adopt the DDIM sampler [58] with 50 sampling steps. Unless otherwise specified, the silhouette weight " + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "text", + "content": " and color weight " + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "text", + "content": " in Eq.1 and Eq.3 are set to 0.6 and 1.0. The classifier-free guidance (CFG) scale " + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 307, + 192, + 564, + 299 + ], + "type": "text", + "content": " in Eq.10 and Eq.11 is set to a default value of 7.0." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 309, + 315, + 418, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 315, + 418, + 327 + ], + "spans": [ + { + "bbox": [ + 309, + 315, + 418, + 327 + ], + "type": "text", + "content": "C. Baseline Comparisons" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 329, + 564, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 329, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 307, + 329, + 564, + 665 + ], + "type": "text", + "content": "Due to the absence of prior work tailored to fine-grained garment generation with multi-condition control, we compare our method against four representative baselines: BLIP-Diffusion [41], AnyDoor [59], ControlNet [22], and IP-Adapter [21]. For subject-driven generation methods, BLIP-Diffusion [41] leverages a learnable Q-Former to align textual and visual embeddings in the latent space, initially designed for subject-preserving generation from text-image pairs. AnyDoor [59] combines identity and detail encoders to reconstruct personalized content, which we adapt to conditions of garment appearance and logo inputs. For plugin-based baselines, we extend ControlNet [22] and IP-Adapter [21] by duplicating and modifying their conditional branches to support multi-conditional inputs, such as silhouette, color, and logo. The adapted versions are referred to as ControlNet-Garment and IP-Adapter-Garment. Specifically, for ControlNet-Garment, we input silhouette, color, logo and mask maps into the ControlNet branch and inject them at each downsampling block, following standard practice. For IP-Adapter-Garment, we extend the official implementation to accept silhouette, color, logo and mask embeddings, which are concatenated and injected via cross-attention. To ensure task relevance, all methods are fine-tuned on our GarmentBench dataset with support for logo-specific conditioning. All methods are trained and evaluated under identical training protocols, input resolutions, and hardware setups. The corresponding quantitative and qualitative results are presented in Table II and Fig. 5, respectively, with detailed analysis provided below." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 665, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 665, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 665, + 564, + 748 + ], + "type": "text", + "content": "Quantitative Results. As shown in Table II, IMAGGarment achieves the best performance across all four metrics on the GarmentBench dataset, demonstrating its superiority in controllable fine-grained garment generation. Compared to subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]), which rely on global features for personalized reconstruction, IMAGGarment shows substantial improvements in FID," + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 46, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 53, + 301, + 252 + ], + "blocks": [ + { + "bbox": [ + 50, + 53, + 301, + 252 + ], + "lines": [ + { + "bbox": [ + 50, + 53, + 301, + 252 + ], + "spans": [ + { + "bbox": [ + 50, + 53, + 301, + 252 + ], + "type": "image", + "image_path": "d3dc9901c6ffb37b8aa90fce37338bfe51ad35aff524d666f4aa800a8bbe3e52.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 259, + 563, + 281 + ], + "lines": [ + { + "bbox": [ + 45, + 259, + 563, + 281 + ], + "spans": [ + { + "bbox": [ + 45, + 259, + 563, + 281 + ], + "type": "text", + "content": "Fig. 5. Qualitative results on seen and unseen GarmentBench samples. The seen set uses original test pairs, while the unseen set involves randomly mixed conditions. IMAGGarment delivers the most consistent outputs, achieving accurate silhouette, color, and logo control across both settings." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 53, + 565, + 253 + ], + "blocks": [ + { + "bbox": [ + 306, + 53, + 565, + 253 + ], + "lines": [ + { + "bbox": [ + 306, + 53, + 565, + 253 + ], + "spans": [ + { + "bbox": [ + 306, + 53, + 565, + 253 + ], + "type": "image", + "image_path": "88692487b4d265fac91ccd274914ea963a8e85d3d96e1a9f6c38b9f571456b6a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 48, + 316, + 304, + 399 + ], + "blocks": [ + { + "bbox": [ + 73, + 293, + 272, + 312 + ], + "lines": [ + { + "bbox": [ + 73, + 293, + 272, + 312 + ], + "spans": [ + { + "bbox": [ + 73, + 293, + 272, + 312 + ], + "type": "text", + "content": "TABLE III QUANTITATIVE ABLATION RESULTS ON GARMENTBENCH." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 316, + 304, + 399 + ], + "lines": [ + { + "bbox": [ + 48, + 316, + 304, + 399 + ], + "spans": [ + { + "bbox": [ + 48, + 316, + 304, + 399 + ], + "type": "table", + "html": "
MethodFID ↓CSS ↓LLA ↑LPIPS ↓
B0139.33104.540.150.64
B147.4236.650.300.15
B230.1997.050.560.33
B321.2043.000.650.11
B446.16108.250.520.38
Full17.6336.160.720.10
", + "image_path": "61e30aa29addae63474715749ba0ffbf5c696de92b586801186800fc3726cf11.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 423, + 301, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 423, + 301, + 614 + ], + "spans": [ + { + "bbox": [ + 45, + 423, + 301, + 614 + ], + "type": "text", + "content": "LPIPS, and CSS. These gains highlight the effectiveness of our mixed attention and color adapter modules in achieving coherent multi-condition fusion, resulting in more realistic, perceptually consistent, and color-faithful outputs. In contrast to plugin-based approaches (ControlNet-Garment [22], IP-Adapter-Garment [21]) that simply stack independent conditional branches, IMAGGarment yields significantly higher LLA, reflecting more precise logo placement. Our proposed " + }, + { + "bbox": [ + 45, + 423, + 301, + 614 + ], + "type": "inline_equation", + "content": "\\mathrm{A}^3" + }, + { + "bbox": [ + 45, + 423, + 301, + 614 + ], + "type": "text", + "content": " module drives these improvements, which adaptively injects spatial priors and logo features into the latent space for accurate local control. Overall, these results indicate that global-only conditioning or naive plugin stacking is insufficient for fine-grained control. By contrast, IMAGGarment provides an effective solution for multi-conditional garment synthesis, enabling precise coordination of global structure and local detail." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 616, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 616, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 616, + 301, + 749 + ], + "type": "text", + "content": "Qualitative Results. Fig. 5 presents qualitative comparisons on both seen and unseen garments. Notably, the seen test set refers to the designated test split of our GarmentBench dataset. In the absence of other suitable public datasets, we assess generalization using an unseen-composition test split constructed by randomly recombining input conditions (e.g., silhouette, color, logo) into combinations that never appear during training, thereby simulating real-world fashion-design scenarios. On seen garments, subject-driven methods (BLIP-Diffusion [41], AnyDoor [59]) reconstruct global appearance but lack spatial control. BLIP-Diffusion retains" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 291, + 564, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 291, + 564, + 531 + ], + "spans": [ + { + "bbox": [ + 307, + 291, + 564, + 531 + ], + "type": "text", + "content": "logo identity yet fails at precise placement due to text-only conditioning, while AnyDoor introduces logo distortions and stylistic artifacts. Plugin-based baselines (ControlNet-Garment [22], IP-Adapter-Garment [21]) treat conditions independently, resulting in poor coordination. ControlNet-Garment suffers from cross-condition interference, and IP-Adapter-Garment often misplaces logos despite preserving structure. In contrast, IMAGGarment achieves accurate control over silhouette, color, and logo placement. On unseen garments, all baselines degrade notably. Subject-driven methods fail to generalize to novel layouts, AnyDoor distorts appearance, and BLIP-Diffusion struggles with logo positioning. Plugin-based methods also falter: ControlNet-Garment produces mismatched outputs, and IP-Adapter-Garment cannot interpret unseen spatial semantics. IMAGGarment remains robust, maintaining alignment across all conditions. This generalization stems from our " + }, + { + "bbox": [ + 307, + 291, + 564, + 531 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 307, + 291, + 564, + 531 + ], + "type": "text", + "content": " module, which effectively integrates spatial and visual cues in the latent space. These results validate the controllability and flexibility of our method in both seen and unseen settings." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 552, + 388, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 552, + 388, + 563 + ], + "spans": [ + { + "bbox": [ + 309, + 552, + 388, + 563 + ], + "type": "text", + "content": "D. Ablation Study" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 568, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 564, + 749 + ], + "type": "text", + "content": "To validate the effectiveness of each component in our framework, we design a series of ablation variants within the IMAGGarment architecture: B0 uses the vanilla Stable Diffusion v1.5 without any of our proposed modules, serving as the baseline. B1 removes the local enhancement model (Stage II), evaluating the impact of omitting logo injection and spatial control. B2 removes the global appearance model (Stage I), assessing the model's performance without structured silhouette and color conditioning. B3 removes the color adapter from the global appearance model, isolating the role of color guidance in generation. B4 replaces our mixed attention with vanilla self-attention in the denoising UNet, testing the importance of spatial fusion with silhouette features. Full represents the complete IMAGGarment framework with all proposed modules integrated." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 51, + 552, + 403 + ], + "blocks": [ + { + "bbox": [ + 55, + 51, + 552, + 403 + ], + "lines": [ + { + "bbox": [ + 55, + 51, + 552, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 51, + 552, + 403 + ], + "type": "image", + "image_path": "ecb858eaa61509df14d51c16c06814ec844adac3bb7acb8a7e175d967928b21e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 404, + 563, + 415 + ], + "lines": [ + { + "bbox": [ + 45, + 404, + 563, + 415 + ], + "spans": [ + { + "bbox": [ + 45, + 404, + 563, + 415 + ], + "type": "text", + "content": "Fig. 6. Qualitative ablation results on GarmentBench. The \"Full\" configuration achieves the best results, highlighting the importance of each component." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 426, + 302, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 426, + 302, + 738 + ], + "spans": [ + { + "bbox": [ + 45, + 426, + 302, + 738 + ], + "type": "text", + "content": "Ablation of Architecture Design. Table III presents the quantitative impact of each component in our proposed IMAGGarment. In B1, which removes the local enhancement stage, the model struggles to place logos precisely, leading to degraded LLA. Although the overall garment structure is preserved, the lack of spatial control prevents accurate logo integration. In B2, without the global appearance stage, the model fails to maintain silhouette and color consistency, resulting in significantly worse FID, LPIPS, and CSS. This demonstrates that local injection alone is insufficient to handle global garment layouts. B3 disables the color adapter, causing notable drops in CSS, highlighting its role in faithful color transfer and control. B4 replaces our mixed attention with standard self-attention, which weakens the fusion of silhouette guidance and causes drops in both LPIPS and FID, indicating reduced realism and structural coherence. The full IMAGGarment achieves the best performance across all metrics, validating the complementary design of each module's effectiveness in handling multi-condition garment generation. Further, Fig. 6 shows qualitative comparisons. B1 fails to align logos spatially, while B2 produces distorted garments lacking color and silhouette guidance. Despite maintaining logo placement, B3 leads to color mismatch, and B4 generates less coherent garment layouts. In contrast, the full model successfully synthesizes garments with accurate silhouettes, precise logo placement, and faithful color reproduction, demonstrating the benefits" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 308, + 426, + 564, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 426, + 564, + 464 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 564, + 464 + ], + "type": "text", + "content": "of our dual-stage design, color adapter, and mixed attention fusion. Overall, The \"Full\" configuration achieves the best results, highlighting the importance of each component." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 309, + 481, + 436, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 481, + 436, + 493 + ], + "spans": [ + { + "bbox": [ + 309, + 481, + 436, + 493 + ], + "type": "text", + "content": "E. More Results and Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "type": "text", + "content": "Controllability Analysis. We assess controllability by varying a single condition at a time (silhouette, color palette, or logo position) while keeping the others fixed. As shown in Fig. 7, each three column block visualizes the model's response to one condition. Changing the silhouette (left block) yields garments that match the target shapes, indicating that the mixed attention module preserves structural alignment. Varying the color palette (middle block) produces the intended color distributions, validating the color adapter for color faithful generation. Adjusting the logo position (right block) achieves precise spatial relocation, showing that the " + }, + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "type": "text", + "content": " module effectively injects spatial priors for local control. Overall, IMAGGarment provides fine-grained and decoupled control of garment attributes suitable for practical design workflows. Non-varied attributes remain stable across manipulations, reflecting minimal cross-condition interference and consistent editing behavior. Sequential composition of edits across attributes produces similar outcomes regardless of edit order, which suggests low inter-attribute coupling. Control fidelity also holds under moderate changes of viewpoint and background, supporting robustness in real design scenarios." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 53, + 211, + 352 + ], + "blocks": [ + { + "bbox": [ + 60, + 53, + 211, + 352 + ], + "lines": [ + { + "bbox": [ + 60, + 53, + 211, + 352 + ], + "spans": [ + { + "bbox": [ + 60, + 53, + 211, + 352 + ], + "type": "image", + "image_path": "4bb9d8f0e0b3f923a99112f702d822e720cb125c76e10189c0e33f9fe0f8cf42.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 226, + 53, + 380, + 355 + ], + "blocks": [ + { + "bbox": [ + 226, + 53, + 380, + 355 + ], + "lines": [ + { + "bbox": [ + 226, + 53, + 380, + 355 + ], + "spans": [ + { + "bbox": [ + 226, + 53, + 380, + 355 + ], + "type": "image", + "image_path": "788e48ddfe0a081261d24ffc7f63ecfa0c2323f0e1a6000ec21737630ee71b22.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 354, + 563, + 373 + ], + "lines": [ + { + "bbox": [ + 45, + 354, + 563, + 373 + ], + "spans": [ + { + "bbox": [ + 45, + 354, + 563, + 373 + ], + "type": "text", + "content": "Fig. 7. Controllability visualization. Each block varies one input condition while keeping others fixed. Left: Silhouette changes lead to consistent structural adaptation. Middle: Color palette variation results in accurate color transfer. Right: Logo mask adjustment yields precise spatial placement." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 395, + 53, + 548, + 355 + ], + "blocks": [ + { + "bbox": [ + 395, + 53, + 548, + 355 + ], + "lines": [ + { + "bbox": [ + 395, + 53, + 548, + 355 + ], + "spans": [ + { + "bbox": [ + 395, + 53, + 548, + 355 + ], + "type": "image", + "image_path": "39d07a318b3669a4b33b0957f8d8651bb2279bd1192b7881c0f6b6c4a389919c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 59, + 376, + 549, + 514 + ], + "blocks": [ + { + "bbox": [ + 59, + 376, + 549, + 514 + ], + "lines": [ + { + "bbox": [ + 59, + 376, + 549, + 514 + ], + "spans": [ + { + "bbox": [ + 59, + 376, + 549, + 514 + ], + "type": "image", + "image_path": "f8afae54c6c3fe55f79462f8c82eac82388206a87f2840c2e634af16eb091fed.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "lines": [ + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "spans": [ + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "type": "text", + "content": "Fig. 8. Hyperparameter analysis of silhouette weight " + }, + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "type": "text", + "content": " and color weight " + }, + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 515, + 315, + 526 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "spans": [ + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": "Hyperparameter Analysis. We study the effect of two key hyperparameters in Eq.1 and Eq.3: the silhouette guidance weight " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " and the color conditioning weight " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": ". From Fig. 8, varying " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " directly impacts the model's ability to follow the reference silhouette. When " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " is too low, the generated structure becomes blurry or deviates from the target shape; when too high, it may suppress color and text guidance. We empirically set " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha = 0.6" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " for balanced structural alignment. Similarly, the color weight " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " controls the influence of the color palette. As " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " increases, color consistency improves steadily, with " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta = 1.0" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " yielding the best visual fidelity. Joint sweeps over " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "(\\alpha, \\beta)" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " indicate a broad stability region around " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha \\in [0.5, 0.7]" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta \\in [0.8, 1.1]" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": ", showing robustness to moderate mistuning. Interaction effects are mild: very large " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " slightly narrows the effective range of " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": ", while very large " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " can oversaturate colors and reduce shading nuance. We therefore adopt " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\alpha = 0.6" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "inline_equation", + "content": "\\beta = 1.0" + }, + { + "bbox": [ + 45, + 537, + 301, + 741 + ], + "type": "text", + "content": " throughout all experiments." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 400, + 538, + 473, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 538, + 473, + 548 + ], + "spans": [ + { + "bbox": [ + 400, + 538, + 473, + 548 + ], + "type": "text", + "content": "V. CONCLUSION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 568, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 564, + 748 + ], + "type": "text", + "content": "We propose IMAGGarment, a unified conditional diffusion framework for fine-grained garment generation with precise control over silhouette, color, and logo placement. By introducing mixed attention, color adapter, and the " + }, + { + "bbox": [ + 308, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "A^3" + }, + { + "bbox": [ + 308, + 568, + 564, + 748 + ], + "type": "text", + "content": " module, our framework explicitly disentangles global structure (silhouette and color) from local attributes (logo content and spatial placement), enabling accurate spatial control and high-quality synthesis. To support this task, we construct GarmentBench, a large-scale benchmark with over 180K samples annotated with multi-level design conditions. Comprehensive experiments on both seen and unseen garments demonstrate that IMAGGarment achieves state-of-the-art results in structure fidelity, color consistency, and logo controllability. Code, models, and datasets are publicly available at https://github.com/muzishen/IMAGGarment." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 144, + 55, + 203, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 55, + 203, + 65 + ], + "spans": [ + { + "bbox": [ + 144, + 55, + 203, + 65 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 76, + 301, + 748 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 52, + 76, + 299, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 76, + 299, + 111 + ], + "spans": [ + { + "bbox": [ + 52, + 76, + 299, + 111 + ], + "type": "text", + "content": "[1] Aijia Zhang, Weiqiang Jia, Qiang Zou, Yixiong Feng, Xiaoxiang Wei, and Ye Zhang. Diffusion-cad: Controllable diffusion model for generating computer-aided design models. IEEE Transactions on Visualization and Computer Graphics, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 113, + 301, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 113, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 52, + 113, + 301, + 156 + ], + "type": "text", + "content": "[2] Xiongzheng Li, Jing Huang, Jinsong Zhang, Xiaokun Sun, Haibiao Xuan, Yu-Kun Lai, Yingdi Xie, Jingyu Yang, and Kun Li. Learning to infer inner-body under clothing from monocular video. IEEE Transactions on Visualization and Computer Graphics, 29(12):5083-5096, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 158, + 301, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 158, + 301, + 194 + ], + "spans": [ + { + "bbox": [ + 51, + 158, + 301, + 194 + ], + "type": "text", + "content": "[3] Nannan Zhang, Zhenyu Xie, Zhengwentai Sun, Hairui Zhu, Zirong Jin, Nan Xiang, Xiaoguang Han, and Song Wu. Viton-gun: Person-to-person virtual try-on via garment unwrapping. IEEE Transactions on Visualization and Computer Graphics, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 194, + 301, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 194, + 301, + 229 + ], + "spans": [ + { + "bbox": [ + 51, + 194, + 301, + 229 + ], + "type": "text", + "content": "[4] Wen-Yang Zhou, Lu Yuan, Shu-Yu Chen, Lin Gao, and Shi-Min Hu. Lcnerf: Local controllable face generation in neural radiance field. IEEE Transactions on Visualization and Computer Graphics, 30(8):5437-5448, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 230, + 301, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 230, + 301, + 266 + ], + "spans": [ + { + "bbox": [ + 51, + 230, + 301, + 266 + ], + "type": "text", + "content": "[5] Pinaki Nath Chowdhury, Tuanfeng Wang, Duygu Ceylan, Yi-Zhe Song, and Yulia Gryaditskaya. Garment ideation: Iterative view-aware sketch-based garment modeling. In 2022 International Conference on 3D Vision (3DV), pages 22-31. IEEE, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 267, + 301, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 267, + 301, + 302 + ], + "spans": [ + { + "bbox": [ + 51, + 267, + 301, + 302 + ], + "type": "text", + "content": "[6] Yu Jin and Kyungho Lee. Human-ai co-creation in fashion design ideation and sketching: an empirical study. In Proceedings of IEEE/CVF Computer Vision and Pattern Recognition Conference (CVPR), CVFAD Workshop, Seattle, USA, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 303, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 303, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 51, + 303, + 301, + 330 + ], + "type": "text", + "content": "[7] Funda Durupynar and Ugur Gudukbay. A virtual garment design and simulation system. In 2007 11th International Conference Information Visualization (IV'07), pages 862-870. IEEE, 2007." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 331, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 331, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 51, + 331, + 301, + 365 + ], + "type": "text", + "content": "[8] Saikrupa PA et al. Smart stitch: A mobile app for personalized garment customization and stitching guidance. In 2025 International Conference on Data Science, Agents & Artificial Intelligence (ICDSAAI), pages 1-5. IEEE, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 366, + 301, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 366, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 51, + 366, + 301, + 402 + ], + "type": "text", + "content": "[9] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. 2023. arXiv:2307.01952." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 403, + 301, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 403, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 403, + 301, + 430 + ], + "type": "text", + "content": "[10] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. 2022. ArXiv:2210.02747." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 430, + 301, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 301, + 466 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 301, + 466 + ], + "type": "text", + "content": "[11] Wanchao Su, Hui Ye, Shu-Yu Chen, Lin Gao, and Hongbo Fu. Drawingstyles: Portrait image generation and editing with spatially conditioned stylegan. IEEE transactions on visualization and computer graphics, 29(10):4074-4088, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 467, + 301, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 301, + 502 + ], + "type": "text", + "content": "[12] Changjian Chen, Fei Lv, Yalong Guan, Pengcheng Wang, Shengjie Yu, Yifan Zhang, and Zhuo Tang. Human-guided image generation for expanding small-scale training image datasets. IEEE Transactions on Visualization and Computer Graphics, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 503, + 301, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 301, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 301, + 529 + ], + "type": "text", + "content": "[13] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. In Proceedings of the ACM SIGGRAPH Conference, pages 1–11, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 530, + 301, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 530, + 301, + 566 + ], + "spans": [ + { + "bbox": [ + 47, + 530, + 301, + 566 + ], + "type": "text", + "content": "[14] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1125-1134, 2017." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 567, + 301, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 567, + 301, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 567, + 301, + 611 + ], + "type": "text", + "content": "[15] Subhadeep Koley, Ayan Kumar Bhunia, Deeptanshu Sekhri, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. It's all about your sketch: Democratising sketch control in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7204-7214, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 612, + 301, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 612, + 301, + 656 + ], + "spans": [ + { + "bbox": [ + 47, + 612, + 301, + 656 + ], + "type": "text", + "content": "[16] Subhadeep Koley, Ayan Kumar Bhunia, Aneeshan Sain, Pinaki Nath Chowdhury, Tao Xiang, and Yi-Zhe Song. Text-to-image diffusion models are great sketch-photo matchmakers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16826-16837, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 657, + 301, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 301, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 301, + 684 + ], + "type": "text", + "content": "[17] Taewook Kim, Ze Wang, Zhengyuan Yang, Jiang Wang, Lijuan Wang, Zicheng Liu, and Qiang Qiu. Conditional text-to-image generation with reference guidance. 2024. ArXiv:2411.16713." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 685, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 685, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 685, + 301, + 711 + ], + "type": "text", + "content": "[18] Jinghao Zhang, Wen Qian, Hao Luo, Fan Wang, and Feng Zhao. Anylogo: Symbiotic subject-driven diffusion system with gemini status. 2024. ArXiv:2409.17740." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 712, + 301, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 748 + ], + "type": "text", + "content": "[19] Mingkang Zhu, Xi Chen, Zhongdao Wang, Hengshuang Zhao, and Jiaya Jia. Logosticker: Inserting logos into diffusion models for customized generation. In Proceedings of European Conference on Computer Vision, pages 363-378, 2024." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 563, + 748 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 310, + 56, + 563, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 56, + 563, + 102 + ], + "spans": [ + { + "bbox": [ + 310, + 56, + 563, + 102 + ], + "type": "text", + "content": "[20] Mingzhe Yu, Yunshan Ma, Lei Wu, Changshuo Wang, Xue Li, and Lei Meng. Fashiondpo: Fine-tune fashion outfit generation model using direct preference optimization. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 212-222, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 102, + 563, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 102, + 563, + 128 + ], + "spans": [ + { + "bbox": [ + 310, + 102, + 563, + 128 + ], + "type": "text", + "content": "[21] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. 2023. ArXiv:2308.06721." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 129, + 563, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 129, + 563, + 163 + ], + "spans": [ + { + "bbox": [ + 310, + 129, + 563, + 163 + ], + "type": "text", + "content": "[22] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 164, + 563, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 164, + 563, + 201 + ], + "spans": [ + { + "bbox": [ + 310, + 164, + 563, + 201 + ], + "type": "text", + "content": "[23] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 201, + 563, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 201, + 563, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 201, + 563, + 227 + ], + "type": "text", + "content": "[24] Shu-Yu Chen, Wanchao Su, Lin Gao, Shihong Xia, and Hongbo Fu. Deepfacedrawing: deep generation of face images from sketches. ACM Transactions on Graphics, 39(4), August 2020." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 228, + 563, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 228, + 563, + 262 + ], + "spans": [ + { + "bbox": [ + 310, + 228, + 563, + 262 + ], + "type": "text", + "content": "[25] Shu-Yu Chen, Feng-Lin Liu, Yu-Kun Lai, Paul L. Rosin, Chunpeng Li, Hongbo Fu, and Lin Gao. Deepfaceediting: deep face generation and editing with disentangled geometry and appearance control. ACM Transactions on Graphics, 40(4), July 2021." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 263, + 563, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 263, + 563, + 289 + ], + "spans": [ + { + "bbox": [ + 310, + 263, + 563, + 289 + ], + "type": "text", + "content": "[26] Xian Wu, Chen Wang, Hongbo Fu, Ariel Shamir, Song-Hai Zhang, and Shi-Min Hu. Deepportraitdrawing: Generating human body images from freehand sketches, 2022. ArXiv:2205.02070." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 290, + 563, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 290, + 563, + 326 + ], + "spans": [ + { + "bbox": [ + 310, + 290, + 563, + 326 + ], + "type": "text", + "content": "[27] Arnab Ghosh, Richard Zhang, Puneet K Dokania, Oliver Wang, Alexei A Efros, Philip HS Torr, and Eli Shechtman. Interactive sketch & fill: Multiclass sketch-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1171-1180, 2019." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 326, + 563, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 326, + 563, + 361 + ], + "spans": [ + { + "bbox": [ + 310, + 326, + 563, + 361 + ], + "type": "text", + "content": "[28] Wengling Chen and James Hays. Sketchygan: Towards diverse and realistic sketch to image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 9416-9425, 2018." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 362, + 563, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 362, + 563, + 388 + ], + "spans": [ + { + "bbox": [ + 310, + 362, + 563, + 388 + ], + "type": "text", + "content": "[29] Zeyu Li, Cheng Deng, Erkun Yang, and Dacheng Tao. Staged sketch-to-image synthesis via semi-supervised generative adversarial networks. IEEE Transactions on Multimedia, 23:2694-2705, 2020." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 389, + 563, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 389, + 563, + 415 + ], + "spans": [ + { + "bbox": [ + 310, + 389, + 563, + 415 + ], + "type": "text", + "content": "[30] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE Signal Processing Magazine, 35(1):53-65, 2018." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 416, + 563, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 416, + 563, + 433 + ], + "spans": [ + { + "bbox": [ + 310, + 416, + 563, + 433 + ], + "type": "text", + "content": "[31] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. 2014. ArXiv:1411.1784." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 434, + 563, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 434, + 563, + 469 + ], + "spans": [ + { + "bbox": [ + 310, + 434, + 563, + 469 + ], + "type": "text", + "content": "[32] Yifang Men, Yiming Mao, Yuning Jiang, Wei-Ying Ma, and Zhouhui Lian. Controllable person image synthesis with attribute-decomposed gan. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pages 5084-5093, 2020." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 470, + 563, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 470, + 563, + 496 + ], + "spans": [ + { + "bbox": [ + 310, + 470, + 563, + 496 + ], + "type": "text", + "content": "[33] Yifan Liu, Zengchang Qin, Zhenbo Luo, and Hua Wang. Auto-painter: Cartoon image generation from sketch by using conditional generative adversarial networks. 2017. ArXiv:1705.01908." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 497, + 563, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 497, + 563, + 532 + ], + "spans": [ + { + "bbox": [ + 310, + 497, + 563, + 532 + ], + "type": "text", + "content": "[34] Yuanzheng Ci, Xinzhu Ma, Zhihui Wang, Haojie Li, and Zhongxuan Luo. User-guided deep anime line art colorization with conditional adversarial networks. In Proceedings of the 26th ACM International Conference on Multimedia, page 1536-1544, 2018." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 533, + 563, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 533, + 563, + 559 + ], + "spans": [ + { + "bbox": [ + 310, + 533, + 563, + 559 + ], + "type": "text", + "content": "[35] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 559, + 563, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 563, + 594 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 563, + 594 + ], + "type": "text", + "content": "[36] Liqian Ma, Xu Jia, Qianru Sun, Bernt Schiele, Tinne Tuytelaars, and Luc Van Gool. Pose guided person image generation. In Proceedings of the Conference on Neural Information Processing Systems, page 405-415, 2017." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 594, + 563, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 594, + 563, + 631 + ], + "spans": [ + { + "bbox": [ + 310, + 594, + 563, + 631 + ], + "type": "text", + "content": "[37] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3408-3416, 2018." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 632, + 563, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 632, + 563, + 658 + ], + "spans": [ + { + "bbox": [ + 310, + 632, + 563, + 658 + ], + "type": "text", + "content": "[38] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proceedings of the Conference on Neural Information Processing Systems, 2020." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 658, + 563, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 658, + 563, + 684 + ], + "spans": [ + { + "bbox": [ + 310, + 658, + 563, + 684 + ], + "type": "text", + "content": "[39] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. 2020. ArXiv:2011.13456." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 685, + 563, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 685, + 563, + 711 + ], + "spans": [ + { + "bbox": [ + 310, + 685, + 563, + 711 + ], + "type": "text", + "content": "[40] Junyao Gao, Yanan Sun, Fei Shen, Xin Jiang, Zhening Xing, Kai Chen, and Cairong Zhao. Faceshot: Bring any character into life. 2025. ArXiv:2503.00740." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 712, + 563, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 563, + 748 + ], + "type": "text", + "content": "[41] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. Proceedings of the Conference on Neural Information Processing Systems, 36:30146-30166, 2023." + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 586 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "text", + "content": "[42] Fei Shen, Xin Jiang, Xin He, Hu Ye, Cong Wang, Xiaoyu Du, Zechao Li, and Jinhui Tang. Imagdressing-v1: Customizable virtual dressing. 2024. ArXiv:2407.12705." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 84, + 301, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 84, + 301, + 110 + ], + "spans": [ + { + "bbox": [ + 47, + 84, + 301, + 110 + ], + "type": "text", + "content": "[43] Ente Lin, Xujie Zhang, Fuwei Zhao, Yuxuan Luo, Xin Dong, Long Zeng, and Xiaodan Liang. Dreamfit: Garment-centric human generation via a lightweight anything-dressing encoder. 2024. ArXiv:2412.17644." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 111, + 301, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 111, + 301, + 137 + ], + "spans": [ + { + "bbox": [ + 47, + 111, + 301, + 137 + ], + "type": "text", + "content": "[44] Weifeng Chen, Tao Gu, Yuhao Xu, and Arlene Chen. Magic clothing: Controllable garment-driven image synthesis. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6939-6948, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 137, + 301, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 137, + 301, + 164 + ], + "spans": [ + { + "bbox": [ + 47, + 137, + 301, + 164 + ], + "type": "text", + "content": "[45] Yuhao Xu, Tao Gu, Weifeng Chen, and Chengcai Chen. Ootdiffusion: Outfitting fusion based latent diffusion for controllable virtual try-on. 2024. ArXiv:2403.01779." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 164, + 301, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 164, + 301, + 217 + ], + "spans": [ + { + "bbox": [ + 47, + 164, + 301, + 217 + ], + "type": "text", + "content": "[46] Xujie Zhang, Binbin Yang, Michael C Kampffmeyer, Wenqing Zhang, Shiyue Zhang, Guansong Lu, Liang Lin, Hang Xu, and Xiaodan Liang. Diffcloth: Diffusion based garment synthesis and manipulation via structural cross-modal semantic alignment. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23154-23163, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 218, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 218, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 218, + 301, + 236 + ], + "type": "text", + "content": "[47] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. 2022. ArXiv:2207.12598." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 236, + 301, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 301, + 255 + ], + "type": "text", + "content": "[48] Muhammad Hussain. Yolov5, yolov8 and yolov10: The go-to detectors for real-time vision, 2024. ArXiv:2407.02988." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 255, + 301, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 255, + 301, + 290 + ], + "spans": [ + { + "bbox": [ + 47, + 255, + 301, + 290 + ], + "type": "text", + "content": "[49] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond, 2023. ArXiv:2308.12966." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 290, + 301, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 301, + 326 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 301, + 326 + ], + "type": "text", + "content": "[50] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7915-7925, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 326, + 301, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 326, + 301, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 326, + 301, + 344 + ], + "type": "text", + "content": "[51] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. ArXiv:1512.03385." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 344, + 301, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 344, + 301, + 371 + ], + "spans": [ + { + "bbox": [ + 47, + 344, + 301, + 371 + ], + "type": "text", + "content": "[52] J. MacQueen. Some methods for classification and analysis of multivariate observations. In Proceedings of the 5th Berkeley Symposium on Mathematical Statistics and Probability, pages 281-297, 1967." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 371, + 301, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 371, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 47, + 371, + 301, + 407 + ], + "type": "text", + "content": "[53] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Proceedings of the Conference on Neural Information Processing Systems, 30, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 407, + 301, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 301, + 442 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 301, + 442 + ], + "type": "text", + "content": "[54] Kai Zeng, Zhou Wang, Anmin Zhang, Zhaohui Wang, and Wenjun Zhang. A color structural similarity index for image quality assessment. In Proceedings of the IEEE International Conference on Image Processing (ICIP), pages 660-664, 2014." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 442, + 301, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 442, + 301, + 478 + ], + "spans": [ + { + "bbox": [ + 47, + 442, + 301, + 478 + ], + "type": "text", + "content": "[55] Masato Fujitake. Rl-logo: Deep reinforcement learning localization for logo recognition. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2830-2834. IEEE, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 479, + 301, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 301, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 301, + 514 + ], + "type": "text", + "content": "[56] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 514, + 301, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 301, + 532 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 301, + 532 + ], + "type": "text", + "content": "[57] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. ArXiv:1711.05101." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 532, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 532, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 532, + 301, + 550 + ], + "type": "text", + "content": "[58] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models, 2022. ArXiv:2010.02502." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 550, + 301, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 586 + ], + "type": "text", + "content": "[59] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 255, + 33 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_content_list.json b/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..004ac7d2aa10ec6d82df27ac3b135ba3d7e2326c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_content_list.json @@ -0,0 +1,6772 @@ +[ + { + "type": "text", + "text": "PerceptionLM: Open-Access Data and Models for Detailed Visual Understanding", + "text_level": 1, + "bbox": [ + 197, + 122, + 802, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jang Hyun Cho $^{1,2,\\ast,\\dagger}$ , Andrea Madotto $^{1,\\ast}$ , Effrosyni Mavroudi $^{1,\\ast}$ , Triantafyllos Afouras $^{1,\\ast}$ , Tushar Nagarajan $^{1,\\ast}$ , Muhammad Maaz $^{3,\\ast,\\dagger}$ , Yale Song $^{1,\\ast}$ , Tengyu Ma $^{1,\\ast}$ , Shuming Hu $^{1,\\ast}$ , Suyog Jain $^{1}$ , Miguel Martin $^{1}$ , Huiyu Wang $^{1}$ , Hanoona Rasheed $^{3,\\dagger}$ , Peize Sun $^{1}$ , Po-Yao Huang $^{1}$ , Daniel Bolya $^{1}$ , Nikhila Ravi $^{1}$ , Shashank Jain $^{4}$ , Tammy Stark $^{4}$ , Shane Moon $^{4}$ , Babak Damavandi $^{4}$ , Vivian Lee $^{1}$ , Andrew Westbury $^{1}$ , Salman Khan $^{3}$ , Philipp Krähenbuhl $^{2}$ , Piotr Dólar $^{1}$ , Lorenzo Torresani $^{1,\\star}$ , Kristen Grauman $^{1,2,\\star}$ , Christoph Feichtenhofer $^{1,\\star}$", + "bbox": [ + 183, + 205, + 834, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Meta FAIR $^{2}$ UT Austin $^{3}$ MBZUAI $^{4}$ Meta Reality Labs", + "bbox": [ + 184, + 316, + 604, + 334 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Joint first author †Work done during internships at Meta *Project lead", + "bbox": [ + 184, + 339, + 684, + 357 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 388, + 537, + 405 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vision-language models are integral to computer vision research, yet many high-performing models remain closed-source, obscuring their data, design and training recipe. The research community has responded by using distillation from black-box models to label training data, achieving strong benchmark results, at the cost of measurable scientific progress. However, without knowing the details of the teacher model and its data sources, scientific progress remains difficult to measure. In this paper, we study building a Perception Language Model (PLM) in a fully open and reproducible framework for transparent research in image and video understanding. We analyze standard training pipelines without distillation from proprietary models and explore large-scale synthetic data to identify critical data gaps, particularly in detailed video understanding. To bridge these gaps, we release 2.8M human-labeled instances of fine-grained video question-answer pairs and spatio-temporally grounded video captions. Additionally, we introduce PLM-VideoBench, a suite for evaluating challenging video understanding tasks focusing on the ability to reason about \"what\", \"where\", \"when\", and \"how\" of a video. We make our work fully reproducible by providing data, training recipes, code & models.", + "bbox": [ + 228, + 420, + 767, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "GitHub: https://github.com/facebookresearch/perception_models", + "bbox": [ + 230, + 647, + 666, + 662 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 681, + 313, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vision-language models (VLMs) are now a key part of computer vision research and are widely used in both academia and industry. Many of the strongest performing VLMs are closed-source, meaning their design, training methods, and the data they use are not publicly shared. To stay competitive, the research community has started to catch up to the proprietary models by using a straightforward approach — distillation from black-box models [1, 2, 3, 4, 5], where proprietary models are directly used to label training data [3, 6, 7], directly leading to strong benchmark results.", + "bbox": [ + 169, + 712, + 826, + 797 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although distillation will unlock strong performance, there are two main issues for basic research. First, it makes it hard to track scientific progress. Specifically, we cannot tell if better results on benchmarks are due to advances in model design or training, or simply because the proprietary teacher models were trained on the evaluation sets of widely used benchmarks or internal data collected to resemble them — this information is not available. Second, the heavy reliance on distillation leads to a fundamental misunderstanding of the effectiveness of current methods for training VLMs from scratch. Several key questions remain unanswered, including the significance of each training stage,", + "bbox": [ + 169, + 801, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13180v3 [cs.CV] 23 Jul 2025", + "bbox": [ + 22, + 281, + 57, + 715 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Meta", + "bbox": [ + 171, + 922, + 248, + 938 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/47547bc1f66e8b472335e2743482081a23778369dfd135fb268632f3e2c4efde.jpg", + "image_caption": [ + "Figure 1: We introduce the largest collection of manually annotated fine-grained activity QA and spatiotemporal captioning data (left panel). Together with this data, we train and release PLM —open and fully reproducible models to facilitate research in vision-language model training (right panel)." + ], + "image_footnote": [], + "bbox": [ + 173, + 88, + 823, + 194 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the influence of synthetic data, the data gaps that the research community should prioritize, and which of these gaps are currently being artificially addressed by distillation from proprietary models.", + "bbox": [ + 169, + 260, + 826, + 289 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To better understand these challenges, we develop the Perception Language Model (PLM), a fully open and reproducible model for transparent research in image and video understanding (Fig. 1 right). PLM consists of a vision encoder with a small scale (<8B parameters) LLM decoder. We start by an analysis of standard training pipelines with available data, without any proprietary model distillation. We investigate large-scale synthetic data and establish key scaling laws to identify critical data gaps that limit video understanding performance, especially for spatio-temporal reasoning and fine-grained understanding tasks.", + "bbox": [ + 169, + 294, + 823, + 391 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To fill these gaps, we create 2.8M high-quality human-labeled instances of fine-grained video QA and spatio-temporally grounded video captions, see Fig. 1. This release is nearly an order of magnitude larger than the largest existing video datasets of each type [8, 9]. Our model, dataset and benchmark push the boundaries of video understanding, and provide a foundation for reproducible and transparent training and evaluation of VLM research. Across 40 image and video benchmarks, we achieve comparable performance with existing state-of-the-art open-weight models (e.g., InternVL2.5 [10]), without distilling from proprietary models, and greatly outperform fully open models (i.e., Molmo [11]).", + "bbox": [ + 169, + 398, + 826, + 494 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 518, + 321, + 534 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Vision-Language Models. Building on the strengths of large language models (LLMs), several vision-language models (VLMs) have recently been proposed for image understanding [1, 12, 13, 14, 15, 16, 17, 18, 19], video understanding [20, 21, 22, 23, 24, 25, 26, 27] and joint understanding of both images and videos [10, 28, 29, 30]. These works employ several modeling advancements such as dynamic high resolution inputs [12], adaptive token compression [25, 31], and multimodal positional embeddings [30].", + "bbox": [ + 169, + 553, + 826, + 636 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Open source, open data VLMs. Training data is a key component in developing powerful VLMs. Many existing approaches train on proprietary data that is not released to the community [32, 33, 34, 35, 36] or on data generated using proprietary models (e.g., GPT4o) [3], effectively distilling the closed models. Doing so make measuring scientific progress difficult and limits research on how to train VLMs ground-up. Molmo [11] proposes a class of open-data models, however, they are image VLMs trained on relatively small-scale data, limiting their performance as our experiments will show.", + "bbox": [ + 169, + 656, + 826, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "VLM Benchmarks. Several benchmarks have been proposed to assess the capabilities of VLMs. Popular image benchmarks cover broad perception and reasoning [37, 38, 39, 40, 41, 42, 43, 44, 19, 45, 46, 47, 48] as well as capabilities like image captioning [49, 50, 51], document/diagram understanding [52, 53, 54, 55, 56, 57, 58, 59, 60, 61], mathematical reasoning [62, 63, 64], visual grounding [65, 66] and hallucination [67, 68]. Popular video benchmarks cover video question answering [20, 8, 69, 70, 71, 72, 73, 74, 75, 76, 77, 22, 78, 79, 80], video captioning [81, 82, 83, 84, 85, 86, 87], and hallucination in videos [88, 89]. Many of these video benchmarks remain image-centric — they have questions that can be answered with a few frames. Video-centric reasoning in benchmarks has been relatively neglected with benchmarks proposed only recently for long video understanding [90, 91, 92, 93, 94, 95, 96, 97, 98] and fine-grained, temporal reasoning [99, 100, 101, 102, 103]. We introduce PLM-VideoBench—a benchmark suite aimed at the core, video", + "bbox": [ + 169, + 758, + 826, + 910 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "centric capabilities that current benchmarks neglect, namely fine-grained activity understanding and spatio-temporally grounded reasoning.", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 PLM: Overview", + "text_level": 1, + "bbox": [ + 171, + 147, + 343, + 165 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we overview the model, training stages and training data involved in the development of PLM. Please refer to Fig. 8 for a detailed overview and Appendix A for additional details.", + "bbox": [ + 169, + 184, + 823, + 213 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Model. PLM consists of a vision encoder and language decoder, where a pre-trained Perception Encoder (PE) [104] is connected to the Llama 3 [13] language decoder (1B, 3B, or 8B parameters) with a 2-layer MLP projector. We use PE L/14 for Llama3.2 1B and 3B, and PE G/14 for Llama3.1 8B. For image input, PLM incorporates dynamic tiling to support high resolution images for up to 36 tiles of $448^{2}$ resolution, where each tile undergoes $2 \\times 2$ average input, PLM uses 32 frames at $448^{2}$ resolution, v dimensions of each video frame.", + "bbox": [ + 169, + 234, + 486, + 402 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/d030c186456dc1b4dfd47039cf3c8be6b9cf516ecc55f24b4303978981f96e51.jpg", + "table_caption": [], + "table_footnote": [ + "Table 1: Summary of three training stages to train PLM. See Appendix Table 7 and Table 8 for data splits.", + "pooling to compress the visual tokens. For video where the same pooling is applied across the spatial" + ], + "table_body": "
Stage 1 WarmupStage 2 MidtrainingStage 3 SFT
ModalityImageImage + VideoImage + Video
Data1M Synthetic72M Mix19M Mix
TrainingProjectileFullFull
Downsampling-2 × 22 × 2
Tiles/Frames1/-16/1636/32
", + "bbox": [ + 498, + 238, + 821, + 329 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data. The data used to train the PLM consists of synthetic and human-annotated samples. Synthetic data enhances the general capabilities of PLM, while human-annotated data broadens these capabilities to encompass more complex tasks. Synthetic data is sourced from a diverse array of image and video datasets, covering fundamental VLM capabilities such as OCR, chart/document/diagram understanding, image/video captioning, and visual question answering.", + "bbox": [ + 169, + 425, + 826, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We design data engines for each data modality (e.g., natural images, charts, documents, figures, egocentric and exocentric videos) to efficiently scale up, creating $\\sim 66.1\\mathrm{M}$ samples ( $\\S 4$ ). The synthetic data can be noisy, but is available at large scale; on the other hand, human-annotated data provides rich, high-quality supervision for image and video tasks. Here, we combine existing human annotations of diverse image and video sources, with our own collected human-annotated data, specifically geared towards fine-grained video understanding and spatio-temporally grounded reasoning ( $\\S 5$ ).", + "bbox": [ + 169, + 501, + 825, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training stages. PLM trains in three stages:", + "bbox": [ + 171, + 607, + 478, + 623 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Projector warm-up.** First, we freeze the vision encoder and LLM and only train the vision projector on a small amount of synthetic image data. This warms-up the newly initialized parameters in the projector and improves stability for later stages. We use $1M$ images from SA-1B [105] with the image captions generated by our data engine (§4).", + "2. Large-scale midtraining with synthetic data. Next, we train PLM on diverse domains of images and videos at scale, using a maximum of 16 tiles for images and 16 frames for videos. PLM sees around 64.7M images and videos with synthetically generated captions and question-answer pairs. We employ our data engine to scale up synthetic data generation (see §4).", + "3. Supervised fine-tuning with human-annotated data. Finally, we train PLM with higher image resolutions and more video frames, using up to 36 tiles for images and 32 frames for videos. In this stage, we tackle more challenging video tasks, including fine-grained QA and spatiotemporally grounded reasoning." + ], + "bbox": [ + 169, + 628, + 823, + 877 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/e4479baec3e3978f3bfd89cfb5cc2991b1e8a1f5648dde908b20e0c4df1874d0.jpg", + "table_caption": [], + "table_footnote": [ + "Table 2: Summary of the data mix for training PLM. See Table 9 for the full data blend.", + "Table 1 shows an overview of our training setup for each stage. Appendix A.1 provides the complete training recipe for each stage, including hyperparameters and data sources." + ], + "table_body": "
SamplesTypeStage
Our Human-annotated (2.87M)
PLM-FGQA2.4MFine-grained3
PLM-STC476.2KR(D)Cap + RTL3
Our Synthetic (66.1M)
Natural Images15.9MCaption1,2,3
Charts & Documents31.9MCaption2,3
Videos Mix17.5MMix.2,3
Ego4D880KCap. + QA2,3
Existing Open Source (6.52M)
Image (92 datasets)5.6MDiverse2,3
Video (27 datasets)920KDiverse2,3
", + "bbox": [ + 563, + 693, + 821, + 834 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 Synthetic Data Generation and Scaling", + "text_level": 1, + "bbox": [ + 171, + 89, + 532, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The predominant paradigm for VLM training is to generate synthetic annotations as cheap alternatives to human-labeled data [1, 106, 30, 107, 10, 11, 15]. Although seemingly promising to get the best results on benchmarks, the majority of such data shared in the community is derived from proprietary models. This trend makes it hard to decouple scientific progress from proprietary distillation impact. In this section, we explore the efficacy of the current paradigm for VLM training in a transparent manner. We design our data engine entirely from open-source models and scale the synthetic data generation to around 66.1M samples of images and videos. We establish the scaling laws of training from synthetic data on standard VLM tasks, including image, OCR/document, and video tasks.", + "bbox": [ + 169, + 119, + 826, + 232 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Data Engine", + "text_level": 1, + "bbox": [ + 171, + 247, + 302, + 262 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our data engine is designed to target base capabilities of VLMs for image and video understanding.", + "bbox": [ + 169, + 272, + 826, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image Data Engine. We generate short and long captions, as well as question-answer pairs, for natural images and those containing documents, diagrams, and text recognizable by optical character recognition (OCR). We prompt openly accessible Llama 3 [13] model to produce factual, detailed image captions while minimizing hallucinations. To create informative question-answer pairs, we utilize OCR data, captions, and other metadata, which are fed into the prompt of a text-only LLM.", + "bbox": [ + 169, + 301, + 823, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Video Data Engine. For videos, we first use an off-the-shelf scene detector [108] to extract video clips of approximately 30 seconds duration. Then, we extract the keyframes and generate frame-level captions using Llama 3, and video captions using our initial PLM trained with Stage 1 and Stage 3 data as shown in Table 2. We then employ an LLM to refine the frame-level and video captions by incorporating existing video metadata (e.g., action labels, time tags) into a cohesive, detailed video-level caption. Similarly, we generate question-answer pairs from the video-level captions.", + "bbox": [ + 169, + 385, + 823, + 469 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The resulting synthetic data is large-scale and diverse – 66.1M samples carefully curated from a variety of image and video sources including natural images, in-the-wild text, chart, figures, documents, egocentric and exocentric videos. Additional details are in Appendix J.", + "bbox": [ + 169, + 474, + 826, + 517 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Scaling Laws with Synthetic Data", + "text_level": 1, + "bbox": [ + 171, + 532, + 447, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We examine scaling properties of our synthetic data under controlled setup and establish scaling laws.", + "bbox": [ + 169, + 558, + 826, + 574 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ce0ddd8b52a979c5ae05cf42d242fd300597fa9681721f1050bf3243c0367b61.jpg", + "image_caption": [ + "Figure 2: Synthetic Scaling Plots. Relationship between Average Error across benchmarks and training compute (in floating-point operations) for various PLM models. We report average errors across Video QA tasks [75, 72, 90, 8, 70, 71], OCR QA tasks [109, 53, 56, 57], and Natural Images tasks [45, 110, 111, 68, 40, 112]. Model's performance using only human-labeled data subset are reported (No Syst.) as well as the actual power-law fit of each subcategory." + ], + "image_footnote": [], + "bbox": [ + 181, + 585, + 818, + 726 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Setup. To establish power-law relationship between compute and validation-set errors of downstream benchmarks, we vary the scale of synthetic data, language model decoders (1B, 3B, and 8B), vision encoders (300M and 2B), and resolution/number of frames. For each configuration, we train a model with the 66.1M synthetic data from our data engine and 6.5M publicly available human-labeled data, following stage 2 training described in §3. At every 2M samples, we evaluate PLM on three categories of downstream benchmarks (VideoQA, OCR QA, Natural QA), constructed from 20 vision-language understanding benchmarks that provide a comprehensive and general evaluation of", + "bbox": [ + 169, + 814, + 828, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "multi-modal large language models. We compute the pareto frontier of these data points and fit a power law relationship: $\\mathrm{Err.} = (\\beta \\times \\mathrm{FLOP})^{\\alpha}$ and compare the exponents $\\alpha$ of the power function as scalability of each setup, where a smaller $\\alpha$ implies better scaling.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Scaling with decoder size. Fig. 2 shows the scaling behavior of PLM across various LLM sizes. We show validation-set errors and training compute on a logarithmic scale, with the black linear line representing the power-law relationship between them. Different colors (green, turquoise, and blue) represent different language model scales (1B, 3B, 8B) while keeping the vision encoder size constant at 300M. As described in the setup section above, we show the power law fit of the pareto frontier in each benchmark category. We also show the results of PLM only trained on 4M human-labeled datasets as baselines, denoted with horizontal lines of each color. The gap from the horizontal line to the data point marks the impact of the synthetic data. Interestingly, all three categories of benchmarks demonstrate clear power-law relationship between compute and average benchmark errors, with the power law exponent $(\\alpha)$ of $-0.15, -0.20,$ and $-0.11$ for Video QA, OCR QA, and Natural Image QA, respectively. In Appendix B, we provide more details and extend the analysis to (1) scaling the encoder size, and (2) scaling the image resolution and video frames.", + "bbox": [ + 169, + 148, + 826, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Limitation of synthetic data. In Fig. 3, we evaluate stage 2 on an extended set of video benchmarks. Specifically, we show the result of 7 challenging video tasks on fine-grained activity understanding [97, 100, 89, 101, 99], temporal grounding [113] and long-video reasoning [92]. Unlike generic, high-level understanding (e.g., \"what is happening in this video\"), the \"challenging\" tasks require a thorough understanding of video in space and time, and fine-grained semantic details. As shown, the challenging video tasks (\"HardQA\" in lavender, plum, magenta) show a poor scaling trend $(-0.03)$ compared to general video QA $(-0.15)$ . The stark difference between the two power law fits shows that scaling synthetic data is only effective for established, base tasks. Extending VLMs to", + "bbox": [ + 169, + 332, + 552, + 526 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "these more challenging, complex tasks still remain unsolved. Next, we address this challenge with high-quality human-annotated video data, PLM-FGQA and PLM-STC.", + "bbox": [ + 169, + 526, + 823, + 554 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2a83600a57b71970f5457ecdbd63b416783eb5ed14f72d374d98f922554cea39.jpg", + "image_caption": [ + "Figure 3: Limitation of synthetic data. Challenging video tasks (HardQA [97, 100, 89, 101, 99, 113, 92]) do not scale well with synthetic data." + ], + "image_footnote": [], + "bbox": [ + 563, + 335, + 823, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Human-annotated High Quality Data", + "text_level": 1, + "bbox": [ + 171, + 575, + 517, + 593 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 3, the current paradigm with synthetic data has run out of steam. Training from tens of millions of synthetically annotated data hardly improves our model on new, challenging video benchmarks. Beyond standard VLM tasks, these benchmarks focus on advanced capabilities such as fine-grained activity understanding, temporal grounding, and long video understanding. Perhaps, the knowledge that these benchmarks examine is simply not present in the initial training set of our data engine nor in existing human-annotated data. Our community lacks high quality datasets for detailed visual understanding to start from, that covers what, where, when, and how of activities in video. To address this gap, we introduce two large-scale, human-annotated video datasets:", + "bbox": [ + 169, + 607, + 823, + 719 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PLM-FGQA is a fine-grained video QA dataset collected by asking human annotators to watch a short video segment and answer model-generated questions which focus on \"what\" activities humans perform and \"how\" they perform these activities. Question types include fine-grained recognition (action and object), fine-grained temporal perception (direction of movements, repetition counts, hand pose etc.), and fine-grained spatial understanding (object locations and spatial relationships). We use a multi-stage data engine to first extract video segments with salient actions from untrimmed videos through temporal clustering and shot-detection. Next, we generate questions and answers using either a text-only LLM or an early version of PLM. Finally, we refine the answers by asking humans to verify or replace them if they are incorrect, resulting in a high-quality QA pairs.", + "bbox": [ + 169, + 724, + 826, + 851 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Overall, we collect 2.4M question answer pairs from various open-access video datasets [114, 115, 116, 117, 118, 83] spanning over 780k unique video clips from diverse domains (e.g., cooking, DIY, carpentry, automotive and bike repair) and viewpoints (egocentric and third-person); refer to Fig. 13 for domain statistics. This is nearly 8 times larger than the size of the largest existing human-annotated", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1a7f2c5e0b01ddbfc1ebb31508d2b53949e7690e93cf520c512940ee99f3d650.jpg", + "image_caption": [ + "Fine-grained QA (FGQA)" + ], + "image_footnote": [], + "bbox": [ + 178, + 107, + 385, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Question", + "text_level": 1, + "bbox": [ + 184, + 169, + 225, + 176 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "How does the person hold the sandpaper?", + "Answer: With their right hand, between the right thumb on one side, fingers on the other side." + ], + "bbox": [ + 184, + 176, + 377, + 203 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Question", + "text_level": 1, + "bbox": [ + 184, + 209, + 225, + 217 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In which direction is the person moving the sandpaper? Answer", + "bbox": [ + 184, + 217, + 380, + 229 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "From the bottom of the baluster to the top in a vertical, oscillating motion.", + "bbox": [ + 187, + 229, + 379, + 243 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/351b84e7e6eab42fbcfc176442e53f21a364c3de841a66578ee280d83a08d964.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 108, + 465, + 162 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/eca9333b1282cfb47b5431e79283bb3bfec193130f5dbfeedb73b9e5184df31f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 465, + 108, + 532, + 162 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/23424f3efaac5d26d36c163a60d6ae225962eedc063b4b408179011888538ac6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 534, + 108, + 602, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Question", + "text_level": 1, + "bbox": [ + 401, + 169, + 441, + 176 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "How many chakli snacks does the person flip?", + "Answer\nThe person flips three chakki snacks with a long metal skewer." + ], + "bbox": [ + 401, + 176, + 589, + 202 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Question", + "text_level": 1, + "bbox": [ + 401, + 209, + 441, + 215 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Where is the metal skewer located at the beginning? Answer", + "Resting on top of the pan, which is positioned on the left burner of the portable stove." + ], + "bbox": [ + 401, + 217, + 589, + 243 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/15d1e4348b171804a04b545daf5f6f95190b7b83cd493103d526f75cae05e941.jpg", + "image_caption": [ + "Figure 4: Overview PLM-FGQA. Examples of question-answer pairs from PLM-FGQA, focusing on fine-grained human activity understanding. PLM-FGQA is approximately 8 times larger than the largest existing human-annotated video QA dataset and addresses a wide range of fine-grained question types that are scarce in existing video QA datasets, such as ones that cover direction of movement, object states, locations and spatial relations." + ], + "image_footnote": [], + "bbox": [ + 633, + 108, + 821, + 250 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "video QA dataset in the community [91]. Moreover, as illustrated by the breakdown of question types1 in Fig. 4 (top-right), PLM-FGQA contains a large number of annotations about fine-grained details that have been largely missing in existing training video QA datasets [119, 69, 71, 76, 20, 120, 121, 122, 123]. Please refer to Table 16 for comparison with existing datasets Table 17 for dataset examples and Appendix G for further details.", + "bbox": [ + 169, + 344, + 826, + 414 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PLM-STC is a spatio-temporal video captioning dataset that offers detailed activity descriptions for each video. It includes timestamps (\"when\") of each activity and focuses on specific subjects identified by a masklet (\"where\"). We employ a two-stage annotation process to improve efficiency in collecting PLM-STC. In the first stage, annotators select interesting objects that exhibit significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. For segments where the subject is out of frame, we automatically supplement \"out of frame\" caption. In the second stage, a separate set of annotators write temporally localized descriptions of the highlighted subject focusing on the changes in action across time in relation to the whole video.", + "bbox": [ + 169, + 420, + 823, + 544 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5f618a5368f89a021eb81345e3435f2d49ab45856488999908734b930348342e.jpg", + "image_caption": [ + "[0,11] Out of frame." + ], + "image_footnote": [], + "bbox": [ + 179, + 575, + 385, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[12, 67] The person wearing a jacket is running on a snow covered ground. She stops and turns to look the other person.", + "bbox": [ + 186, + 672, + 380, + 696 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3e89be3574960690c64f0b7055c09f35fd1083243414bd350b97a1cb23b8a777.jpg", + "image_caption": [ + "Spatio-temporal Captions (STC)" + ], + "image_footnote": [], + "bbox": [ + 397, + 575, + 602, + 631 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[0, 19] The man moves gracefully, using his hand gestures that closely resemble a dance in most of his actions.", + "bbox": [ + 401, + 645, + 596, + 667 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3cb34894424b16abcd3d3c152fa48af28bed03b50451ed2a221c53cd3c8b8e04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 671, + 542, + 678 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[20, 31] The person moves from right to left.", + "bbox": [ + 403, + 683, + 578, + 691 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6066602844e8b0ad50061ab254751ffb1f532fee4ceadb24ac1ba8e92a6d26a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 575, + 818, + 631 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0c0a20a943cefe72f701d79b7f86c02d855115c56f28fa87e454639b1e91c242.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 635, + 637, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1143283bbc17c530501279b590e48166d0db6a1109e811e196e05fc60f1b5b76.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 635, + 656, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0255fd220e6ac7c13d170a81727d27bea3e506b3d3069ec1c128a222ce73a63f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 635, + 676, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7ef9456e3e8bc1c9728a3e962e610a1c30befbc64747dbf0b91c857670438e56.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 635, + 689, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/97cf383d4d374e8b3977caada0df1ed59c91b9b0a1f796452a8293c81320b1d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 635, + 702, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b278c198dd92d6a581093a9fa531b979cd6e4f3bd04c4e44384c4692ee3b879c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 635, + 718, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9864828c246fe26b65456150ba8bd91c706c9812d190f4a5efca447c9728f7af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 723, + 635, + 733, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d41eaa8e359ef64abc050548e3649c0aeeb3674038d0e26555c3356e5303b499.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 738, + 635, + 750, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/29e17118daecdb0236c798256cd130694fef900820dada6615bbaac98b8d8473.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 635, + 774, + 642 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/89d0d7a3c21c206e56aaaa11c9c5e7d79f945c9b65cae8a8e0fec0f8e1f86c4a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 635, + 797, + 642 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[0, 81] A little girl moves back as a beluga whale approaches her face.", + "bbox": [ + 617, + 645, + 812, + 660 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0f31fcadc2545bde6adc001104d669a9c44c08c90a96da858abeba5c2c96a6be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 672, + 638, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bc10df81fb05dadf42f2023d44ad0bb57d60963ed328118c67148d5817dc5522.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 672, + 656, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0634050c5c6c955d91463775c87e845f941b15af2cd726e113fcf6f598984ca1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 672, + 681, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/596ed4223987ed7563cda1983d1635941fdc699dfd1a300ada8f50b7f5e44453.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 672, + 707, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/00959a907eafab253ebdf64533fd027c7f96ad991275ff48d2062fcf5260ca42.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 672, + 733, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/91ce2b0c7edafe9f2cdb86fd1bac7e9af617712241bb64d90283a784a86583ea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 738, + 672, + 750, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7505d157279275cd75e2263900d283280d6a31fed16ec40ff7e0b5246fa8b0eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 672, + 774, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/08bee4120d15c4cdbacba31bbe60ee9f1500ebfbdb2f97a74191f10929d65b87.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 672, + 797, + 681 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/11534c4770ca3e4df1da4029b256918d26cd41aad652b75a64983e9aad4afe44.jpg", + "image_caption": [ + "Figure 5: Overview of PLM-STC. Examples of spatio-temporally grounded captions from PLM-STC, the first dataset to associate each caption both with a temporal interval as well as a high-fps sequence of segmentation masks of the subject - i.e., masklets (compared to just a temporal interval or a sparse sequence of bounding boxes)." + ], + "image_footnote": [], + "bbox": [ + 619, + 696, + 638, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/17c5eb7fd14ddd780b690caf976e0b5d6fbf98b5581472641250019c0586823f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 696, + 656, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/602809f1913f9496e798db0d1ff6265cb86eca6e2102ca6f99b6a28a2a55ecfb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 696, + 678, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b28c607dd051eb3686f994e025f772f462e1f5c4155ba9d5120a264759eec9a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 696, + 689, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2bb4604e65b559f08032de179856131f0797537783eeacc79f3eccd2b0f7dd37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 696, + 704, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/97d9635db506762758a4df3dbf6aa06a6fcab3df945535f5fe1735ee297ab44f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 709, + 696, + 718, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ffd629f7f7e75549359fde7427e50e5d3bdbadedeb27a25b9dcc7b169a3c6815.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 723, + 696, + 733, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/86721f53456f8242f459cbc94cfdb8022a9218ecb8bb1279d08b577df5f60a33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 738, + 696, + 750, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/167690a1116fa3a65a9272f93b7c75944cfdb4555c529306b935c42b6cabbfcc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 764, + 696, + 774, + 703 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ae63e4f0bb0e73421d1c086dfad90e8ed1ab8c8c8cc45e4aad8f14002bf60f93.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 696, + 797, + 703 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, we collect 194.2K spatio-temporal captions as the first existing large-scale dense video-region captioning dataset. We convert these spatio-temporal captions into three tasks for training: RCap (194.2K): Given the video region and timestamps, the model generates a caption; RTLoc (194.2K): Given the video region and caption, the model localizes the action; and RDCap (122.3K): Given the video region, the model generates dense, localized captions. In total, we construct $194.2\\mathrm{K} + 194.2\\mathrm{K}$ $+122.3\\mathrm{K} = 522.7\\mathrm{K}$ samples, of which $476.2\\mathrm{K}$ are used for training and the rest for constructing", + "bbox": [ + 169, + 800, + 823, + 883 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1 obtained with LLM-based tagging.", + "bbox": [ + 192, + 897, + 406, + 911 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PLM-VideoBench. Please refer to Fig. 5 for dataset examples, Table 19 for comparison with existing datasets, Table 20 for dataset statistics and Appendix H for further details.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1 PLM-VideoBench", + "text_level": 1, + "bbox": [ + 171, + 133, + 343, + 148 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our high-quality human-annotated data offers VLMs to train for broader range of capabilities for holistic video understanding. However, existing video benchmarks are not adequately equipped to evaluate these. To this end, we introduce PLM-VideoBench, a novel benchmark focusing on specific activities (what) and their execution details (how) within spatio-temporal contexts (where and when).", + "bbox": [ + 169, + 160, + 826, + 217 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/87fd1bd30b14c09e549e689f3d4cafff2807a92481fbf1c5bf4db17ddcec181b.jpg", + "image_caption": [ + "Figure 6: PLM-Video Dataset includes fine-grained video QA (FGQA), open-ended QA in videos recorded using smart glasses (SGQA), Spatio-Temporal Captions (STC) post-processed into video region captioning (RCap), video region temporal localization (RTLoc) and video region dense captioning (RDCap) tasks." + ], + "image_footnote": [], + "bbox": [ + 173, + 224, + 823, + 390 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fine-Grained Question Answering (FGQA). In this task, a model must answer a multiple-choice question (MCQ) that probes nuanced, fine-grained activity understanding (e.g., painting \"vertically\" vs. \"horizontally\" in Fig. 6, first). We report multi-binary accuracy (MBAcc) [99] where each question is split into multiple binary choice questions. Our test set consists of 4,371 question-answer pairs. For more information, including statistics on video clips, segment duration, question types, and benchmark construction, see Table 18 and §G.2.", + "bbox": [ + 169, + 454, + 826, + 539 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Smart Glasses Question Answering (SGQA). In this task, a model must answer open-ended questions about activities and objects visible in an egocentric video stream recorded by a smart-glasses device (see Fig. 6, second). The questions are designed to simulate real-world scenarios where a user would ask for assistance from their smart glasses. We manually collect the videos using commercially available smart glasses, providing a completely new, unique dataset that reflects modern use-cases such as online AI video assistance and activity coaching. For evaluation, we use LLM-judge accuracy with an open-access model (Llama3.3 70B). The test set consists of 665 human-annotated question-answer pairs. See Appendix I for more details.", + "bbox": [ + 169, + 551, + 826, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Video Region Captioning (RCap). In this task, a model must generate a detailed description of an event involving a subject of interest in the video. Given a region masklet and a specified time interval, the model is required to output a caption that accurately describes the event occurring within that interval. Compared to traditional video captioning [125, 83, 84] where the aim is to generate a video-level caption, the goal is to generate a region-level caption tied to a specific subject (e.g., a person, object or animal) (see Fig. 6, third). The test set contains 10,060 human-annotated instances and we report LLM-judge accuracy with Llama3.3 70B. See Appendix C.3 for details.", + "bbox": [ + 169, + 676, + 825, + 776 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Region Temporal Localization (RTLoc). In this task, a model must identify the precise time interval within the video when the specified event takes place for the given subject. Given a video, a region masklet and a text description of the event, the model is required to output the start and end timestamps that correspond to the occurrence of the event (see Fig. 6 fourth). Notably, this task is the inverse of RCap — instead of generating the caption, the model receives it as input and generates the corresponding time interval. We filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap. We report average recall@1 over IoU thresholds (0.3, 0.5, 0.7, 0.9). See Appendix C.3 for details.", + "bbox": [ + 169, + 787, + 825, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Region Dense Video Captioning (RDCap). In this task, a model must generate a detailed description of all events involving a specific subject of interest (e.g., person, animal, or object) in a video. Given a video and a region masklet, the model must produce a sequence of (start, end, caption) tuples that cover the entire duration of the video, including periods when the subject is not visible (see Fig. 6, last). This task is a composition of RTLoc and RCap, requiring the model to produce both temporal windows for events as well as captions directly from the video. The test set contains 2,620 samples and we report the SODA score [126] which uses an LLM judge. See Appendix C.3 for details.", + "bbox": [ + 169, + 90, + 826, + 203 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Experiments", + "text_level": 1, + "bbox": [ + 171, + 220, + 313, + 238 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We first overview the baselines and evaluation setting (§6.1). We then compare benchmark results of PLMs with the baselines on a broad collection of image (§6.2) and video (§6.3) tasks as well as on our PLM-VideoBench (§6.4). Finally, we provide analyses on data and model ablations (§6.5).", + "bbox": [ + 169, + 251, + 823, + 292 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.1 Setup", + "text_level": 1, + "bbox": [ + 171, + 301, + 253, + 316 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare PLMs against the following two classes of baselines:", + "bbox": [ + 169, + 327, + 604, + 342 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Proprietary models such as GPT-4o [33] (gpt-4o-2024-11-20), Gemini-Pro 1.5 [34] and Gemini-Flash 2.0 [35]. We use API calls to evaluate these models.", + "- Open-access models such as Molmo-O [11], LLaVA-OneVision [28], Qwen2.5-VL [106] and InternVL2.5 [10] — state-of-the-art open-access models, for which model scale, architecture and inference code are available. We use the official inference code for all models." + ], + "bbox": [ + 171, + 345, + 823, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Inference protocol. For mask inputs in PLM-VideoBench, we overlay a colored box on the video frames to specify the regions. We report validation set performance unless specified (in brackets) under the benchmark name. Metrics marked with $\\dagger$ use LLM as a judge. Complete implementation details including inference hyper-parameters, task prompts, judge prompts and proprietary model evaluation protocol can be found in Appendix C.4.", + "bbox": [ + 169, + 429, + 823, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.2 Image Benchmark Results", + "text_level": 1, + "bbox": [ + 171, + 508, + 397, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We evaluate PLM on a total of 20 image benchmarks. Charts, Diagrams and Documents: answer questions that require parsing images of documents and diagrams; Image Captioning: generate a short/detailed caption, Perception and Reasoning: answer questions of varying difficulty about objects, actions, functional correspondence, multi-view reasoning, spatial layout etc. and Hallucination: evaluate robustness to hallucinated details. More details are in Appendix C.1.", + "bbox": [ + 169, + 527, + 826, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3 shows our results. Overall, PLM shows strong performance on a wide spectrum of image benchmarks with solely from open-access data with a white-box data engine. Additionally, we report", + "bbox": [ + 169, + 603, + 823, + 633 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/62e5bca9a4bdf80bc199e8421fe81eaf8f45fa81c53e4ab87f0912b80879803e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelCharts, Diagrams and DocumentsPerception and ReasoningHard PerceptionHalluc.
DocVQA (test) acc [53]CharQA acc [54]TextVQA acc [52]InfoQA (test) acc [56]AL2D (n/o mask) acc [55]OCR-Bench acc [57]MMMU (rval) acc [37]VQA2 (rval) acc [111]OK-VQA acc [39]VizWiz acc [40]SEED (image) acc [58]BLINK (multi-image) acc [44]CV-Bench acc [19]RealWorldQA acc [45]VSR acc [127]POPE acc [68]
GPT-4o [33]92.8*85.7*75.380.7*94.2*81070.7*-63.9-77.1*68.0*72.573.978.087.2*
Gemini 1.5 Pro [35]94.084.274.881.0*95.783063.2-63.9-77.859.881.066.376.188.2*
Gemini 2.0 Flash [35]93.084.880.281.094.079269.9*-57.8-77.064.482.371.974.8-
1B scale
Qwen2VL-2B [30]90.1*75.380.365.5*84.6*809*41.1*80.059.767.472.944.4*17.362.6*73.087.2
InternVL2.5-1B [10]84.8*75.9*72.0*56.0*77.8*785*40.9*72.251.547.471.342.442.158.365.490.2
PLM-1B90.778.682.163.084.980734.881.761.059.776.346.873.867.168.888.4
3B scale
Qwen2.5 VL-3B [106]93.9*83.179.3*77.1*90.2797*53.1*80.863.271.973.147.6*54.465.4*78.588.2
InternVL2.5-4B [10]91.6*84.0*79.372.1*90.5*828*52.3*80.964.061.875.650.8*55.964.680.091.0
PLM-3B93.884.384.374.690.983041.284.366.864.078.555.481.472.480.488.7
8B scale
Molmo-7B-O [11]90.8*80.4*80.4*70.0*90.7*-39.3*85.3*-----67.5*--
LLaVA-OV-7B [28]86.780.077.368.890.165648.983.569.663.476.449.475.066.778.189.2
Qwen2.5VL-7B [106]95.7*87.3*84.9*82.6*93.0864*58.6*70.161.073.573.256.4*11.969.880.387.2
InternVL2.5-8B [10]93.0*84.8*79.377.6*92.8*82356.0*80.669.264.377.654.8*53.970.1*80.090.6*
PLM-8B94.685.586.580.992.787046.185.669.667.079.356.081.375.082.889.9
", + "bbox": [ + 173, + 650, + 823, + 895 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Image benchmarks. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature, and the remaining are reproduced using official code.", + "bbox": [ + 169, + 900, + 823, + 926 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/52ea6fb7910ba74270ecfac5de8092c609865cd4806fe73566a5fb07f843d9bf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelVCap.Video QAFine-grained Video QAT.Loc.Halluc.
DREAM-1K F/F [86]MVBench acc [70]NEX-TQA acc [69]PerceptionTest (test) acc [71]STAR acc [72]Video-MME acc [75]ActivityNet-QA acc [76]EgoSchemas (test) acc [90]TemporalBench MBA acc [99]TOMATO MBO acc [100]MotionBench (dev) acc [101]TempCompass (MCC) acc [102]CG-Bench (clue) acc [97]Charades-STA mOU [113]VideoHallucer overall acc [88]EventHallusion (binary) acc [89]
Proprietary
GPT-4o [33]-64.6*79.1-70.471.9*-72.2*38.5*37.7*55.974.558.3*38.656.491.9*
Gemini 1.5 Pro [35]-60.5*81.665.9-75.0*56.7*71.2*34.732.056.175.650.1*34.256.080.9
Gemini 2.0 Flash [35]-60.781.9--70.3*-71.5*27.632.856.176.947.0*29.860.181.6
1B scale
Qwen2VL-2B [30]26.863.2*76.453.9*67.355.6*38.427.013.125.746.962.342.80.334.959.9
InternVL2.5-1B [10]27.764.874.359.473.050.3*60.755.727.725.045.056.440.90.831.038.9
PLM-1B34.370.180.372.783.749.262.560.418.225.552.264.643.655.249.279.5
3B scale
Qwen2.5 VL-3B [106]20.367.076.866.9*63.061.5*59.264.8*17.223.549.263.045.738.8*45.253.5
InternVL2.5-4B [10]29.271.782.567.977.262.3*64.166.623.727.452.765.252.08.449.666.3
PLM-3B37.474.783.479.384.854.966.266.923.430.960.469.347.257.755.576.5
8B scale
LLaVA-OV-7B [28]28.057.181.058.166.057.760.545.419.527.653.767.841.212.134.761.1
Qwen2.5VL-7B [106]23.369.6*80.070.5*68.165.5*63.765.0*24.524.651.171.7*49.843.6*50.161.1
InternVL2.5-8B [10]28.572.685.568.9*77.664.2*66.166.2*24.329.453.568.3*53.114.357.160.2
PLM-8B35.977.184.182.784.958.367.368.828.333.261.472.746.458.657.777.3
", + "bbox": [ + 178, + 80, + 823, + 325 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Image Grounding task results on RefCOCO/+/g [65] datasets in Appendix Table 14, and show that PLM outperforms both specialist models as well as the VLM baselines in all model scales.", + "bbox": [ + 169, + 368, + 823, + 396 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.3 Video Benchmark Results", + "text_level": 1, + "bbox": [ + 171, + 409, + 393, + 422 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We evaluate PLM on a total of 25 video benchmarks. We divide these into the following categories. Video Captioning: generate a short caption for a video, or a dense description of all events; Short video QA: answer a question about a short video (few seconds to a minute), either by selecting from a list of options, or providing a free-form answer; Long video QA: answer a question as before, about a much longer video (minutes to hours); Fine-grained QA: answer detailed questions about spatial location, motion, temporal information etc.; and Hallucination: evaluate the robustness of video models to hallucinated details about objects and events.", + "bbox": [ + 169, + 429, + 826, + 526 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4 shows video captioning, video QA, fine-grained video QA, and video hallucination results. We achieve strong results on widely adopted benchmarks, despite only using open-access data mix free from proprietary model artifacts, outperforming both the open-access and proprietary models.", + "bbox": [ + 169, + 532, + 825, + 575 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Further, we achieve competitive performance on the majority of challenging benchmarks, such as EgoSchema (68.8 %), MotionBench (61.4 %), TOMATO (33.2 %), TempCompass (72.7 %), TemporalBench (28.3 &), Charades-STA (58.6 %), and more. All our model scales show strong performance against both proprietary models as well as open-access baselines of same scale.", + "bbox": [ + 169, + 580, + 823, + 638 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Lastly, we also show that PLMs at all scale greatly outperform existing approaches on captioning tasks and hallucination detection tasks, owing to our focus on detailed, fine-grained spatio-temporal annotations in our human-annotated data collection.", + "bbox": [ + 169, + 643, + 823, + 685 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.4 PLM-VideoBench Results", + "text_level": 1, + "bbox": [ + 171, + 698, + 393, + 710 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We report the result on our proposed benchmark PLM-VideoBench from §5.1 in Table 5. We evaluate our PLM as well as (proprietary and open-access) baselines. In addition, we provide human performance of each subtask in the first row. The results show a significant gap between the baselines and PLM. Proprietary baselines and open-source baselines alike perform reasonably on FGQA tasks, though still 6.5 points lower than PLM (61.2 vs 67.7). On SGQA, where the video sources and the question-answer pairs are unseen to all models, PLM performs reasonably well, yet 2.1 points short from open-access best (InternVL2.5) and far from the best proprietary model", + "bbox": [ + 169, + 718, + 501, + 912 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/88f1750edc60d7fca24ce4b6116dcb56895c0a14ae4539375ffb52be7846b390.jpg", + "table_caption": [ + "Table 4: Video benchmark results. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature and the remaining are reproduced using official code." + ], + "table_footnote": [], + "table_body": "
ModelFQQA MBAccSGQAc+†RDCap SDAD†RCap score†RTLoc meanRAvg.
Human perf.90.967.966.653.967.873.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
Open-access
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
", + "bbox": [ + 516, + 696, + 821, + 868 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 5: PLM-VideoBench results. We evaluate PLM against baselines and report breakdowns. We report human performance in the first row.", + "bbox": [ + 509, + 875, + 823, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(GPT-4o). On spatio-temporal tasks (RDCap, DCap, RTLoc), open source baselines are unable to perform grounded reasoning and default to repeating the same caption for every time interval. Proprietary models perform reasonably well, yet far from the human performance. In all sub-tasks of PLM-VideoBench, PLM shows competitive performance compared to proprietary and open-access baselines. Results for all model scales are in Appendix D.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Note that the human performance varies based on the nature of the task and evaluation metrics. For example, FGQA human scores are naturally higher than RCap because the task is structured (select the correct option vs. open-ended) and the metric is objective (accuracy vs. LLM-judge accuracy).", + "bbox": [ + 169, + 167, + 823, + 209 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6.5 Ablation Studies", + "text_level": 1, + "bbox": [ + 171, + 224, + 328, + 239 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Setup. We perform an ablation study to assess the importance of each of our proposed data, both synthetic and human-annotated. We start with PLM 3B after stage 2 training, and finetune on 4M short image and video SFT data mix ${}^{2}$ for the data ablation. We evaluate and report average video benchmark performance across five categories — video captioning, short video QA, fine-grained QA, and video hallucination, as well as spatial and temporal tasks, PLM-VideoBench and three image categories — image OCR, image captioning, and image perception. Full details are in Appendix A.3.", + "bbox": [ + 169, + 250, + 826, + 335 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/2bcd6ae87dc81ee9d74711aff2bc23d783e6471cd8e696a0f03e4b894bb0b5b6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PLM-Synth.PLM-STCPLM-FGQATotal AveragePLM-VideoBenchVideo TasksImage Tasks
PLM-FGQAMBaccPLM-SGQAacc†3 metric avg.Fine-Grained QA5 benchmark avg.Video Cap.Dream 1KVideo QA5 benchmark avg.Video Hallu.2 benchmark avg.Spatial&Temp.4 benchmark avg.Image OCR6 benchmark avg.Image Cap.3 benchmark avg.Image Rec.5 benchmark avg.
XXX48.539.734.46.642.224.067.564.950.676.064.363.3
XX54.349.835.914.748.829.973.273.356.184.065.965.5
X57.949.936.242.148.632.373.974.262.983.867.565.0
X56.762.943.215.250.130.474.176.358.383.764.065.6
61.263.644.042.250.234.374.676.364.383.774.265.4
", + "bbox": [ + 184, + 348, + 648, + 470 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 6: Ablation. We show the impact of individual data components in PLM training. For this ablation, we use a reduced the SFT datamix consists of 4M open-access image and video data. Results are aggregated validation-set performance over selected benchmarks in each category of tasks, details in Appendix A.3.", + "bbox": [ + 169, + 473, + 663, + 525 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/458aaf373773ebd260cfcb625ce01d51d1384862292a9f919b559e5e0e8baf6b.jpg", + "image_caption": [ + "Figure 7: HardQA improves with PLM data." + ], + "image_footnote": [], + "bbox": [ + 689, + 364, + 805, + 489 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Discussion. First, we observe that stage 2 synthetic data training boosts model performance across the board. Moreover, adding our PLM-STC data further improves a variety of benchmarks, including PLM-STC (+27.4 points), video captioning (+2.4 points), and most importantly, spatial and temporal tasks (+6.8 points). Adding our PLM-FGQA data improves a distinct set of categories for fine-grained activity understanding; PLM-FGQA (+13.1 points), PLM-SGQA (+7.3 points), Fine-grained video tasks (+1.3 points), video hallucination tasks (+3.0 points), and spatial and temporal tasks (+2.2 points). Using our human-annotated data altogether results in the best performance overall. Further in Fig.7, we show that our human-annotated data improves upon HardQA [97, 100, 89, 101, 99, 113, 92], effectively addressing the limitations of synthetic data discussed in §4.2.", + "bbox": [ + 169, + 547, + 826, + 672 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 691, + 302, + 707 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work presents Perception Language Model (PLM), a fully-reproducible vision-language model to transparently tackle visual perception tasks without distillation of private black-box models. We trained PLM using data from existing open-access datasets and synthetic samples generated by our data engine. We identified gaps in detailed video understanding capabilities that cannot be filled with synthetic data. In response, we collected 2.8M human-labels for fine-grained video question answering and spatio-temporally grounded captioning, and created a new benchmark, PLM-VideoBench, to evaluate these capabilities. We hope our open dataset, benchmark, and models will foster transparent research in visual perception.", + "bbox": [ + 169, + 723, + 826, + 834 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "23.8M datamix: TextQA 500K, Image QA 2.8M, and Video QA 500K. Each detail can be found in Tab. 9.", + "bbox": [ + 189, + 896, + 818, + 911 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 426, + 87, + 573, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table of Contents", + "text_level": 1, + "bbox": [ + 171, + 128, + 354, + 147 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A PLM Training Details 12", + "bbox": [ + 210, + 152, + 787, + 169 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A.1 PLM Training Setting 12", + "A.2 PLM Training Datamix 13", + "A.3 Ablation Experiment Details 14" + ], + "bbox": [ + 233, + 170, + 785, + 214 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B Synthetic Scaling Experiments 14", + "bbox": [ + 210, + 224, + 785, + 241 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C VLM Benchmark Details 16", + "bbox": [ + 210, + 252, + 785, + 266 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "C.1 Image Benchmarks 16", + "C.2 Video Benchmarks 17", + "C.3 PLM-VideoBench 17", + "C.4 Evaluation Protocols 18" + ], + "bbox": [ + 233, + 268, + 785, + 327 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "D Additional PLM-VideoBench Results 19", + "E Baseline Implementation Details 19" + ], + "bbox": [ + 210, + 339, + 785, + 380 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "F Additional Results 20", + "bbox": [ + 210, + 392, + 785, + 406 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "F.1 Comparison with LLaMA-3V 20", + "F.2 Image Captioning 20", + "F.3 Image Grounding 21", + "F.4 Long Video Understanding 21" + ], + "bbox": [ + 233, + 407, + 785, + 467 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "G PLM-FGQA: Fine-grained QA 22", + "bbox": [ + 210, + 479, + 785, + 493 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "G.1 Annotation process: Data Engine 22", + "G.2 FGQA PLM-VideoBench Construction 27" + ], + "bbox": [ + 233, + 494, + 785, + 523 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "H PLM-STC Details 28", + "bbox": [ + 210, + 536, + 785, + 549 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "H.1 Annotation Process 28", + "H.2 PLM-STC Benchmark 30" + ], + "bbox": [ + 233, + 551, + 785, + 579 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "I Smart Glasses Data 30", + "bbox": [ + 210, + 592, + 785, + 606 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "I.1 Data collection and annotation 30", + "I.2 SGQA Benchmark 31" + ], + "bbox": [ + 233, + 608, + 785, + 636 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "J Synthetic Data Engine 31", + "K Qualitative Results 35", + "L Limitations and Future Work 39", + "M Broader Impact 39" + ], + "bbox": [ + 210, + 648, + 785, + 742 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A PLM Training Details", + "text_level": 1, + "bbox": [ + 171, + 89, + 395, + 107 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/edd4dba1e3710253faa97eb20998e70076ec12a6c3c6fb22067fbab64044c139.jpg", + "image_caption": [ + "Figure 8: The figure provides an overview of the datasets used in the paper. PLM is trained with $47.8M$ synthetic image and $18.4M$ synthetic video, and $2.9M$ human-labeled video samples. Our data enables PLM to perform a variety of tasks, including standard tasks like Image, Multi-image, and Video QA, as well as new video tasks such as Fine-grained QA (FGQA), Region Temporal Localization (RTLoc), Region Captioning (RCap), and Region Detailed Captioning (RDCap)." + ], + "image_footnote": [], + "bbox": [ + 173, + 127, + 823, + 378 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we describe the training details of PLM. In §A.1 we describe exact details of training setting such as hyper-parameters and implementation details. In §A.2 we describe our datamix for both synthetically generated and human-annotated parts.", + "bbox": [ + 169, + 464, + 823, + 508 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.1 PLM Training Setting", + "text_level": 1, + "bbox": [ + 171, + 522, + 372, + 537 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For all three stages, we use AdamW optimizer [128] with weight decay of 0.05 and use FSDP [129] with FlashAttention2 [130] for overall implementation based on PyTorch [131].", + "bbox": [ + 169, + 547, + 823, + 578 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Stage 1 training. In stage 1, we use a subset of SA-1B [105] paired with detailed captions generated by our data engine (§4.1). We use total 1M samples to train PLM with next token prediction loss, with vision encoder and LLM parameters frozen. This stage is commonly known as warm-up stage. We use learning rate $1 \\times 10^{-4}$ for all model scale with global batch size of 512 and $448 \\times 448$ resolution. We use the Perception Encoder [104] L/14 variant for the 1B and 3B PLM models, and the G/14 variant for the 8B PLM model.", + "bbox": [ + 169, + 590, + 826, + 674 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Stage 2 training. In Stage 2, we train on a total of 72.5M samples. Of these, 66M consist of images and videos with synthetically generated annotations produced by our data engine. The remaining 6.5M samples are a subset of human-annotated images and videos from open-source datasets, which are included in our final datamix described in §A.2. We train with global batch size of 2048, learning rate of $4 \\times 10^{-5}$ , weight decay of 0.05 for the full set of parameters (vision encoder, projector, and LLM). For both image and video input, we use $448 \\times 448$ resolution for each tile/frame, which effectively generate 1024 vision tokens. We apply $2 \\times 2$ spatial average pooling to reduce this to 256. We use dynamic tiling with a thumbnail to support any resolution and aspect ratio, similar to prior work [12], and uniform sampling of video frames after preprocessing the videos to 1 fps. We set the maximum number of tiles/frames to be 16, which results in maximum of $(16 + 1) \\times 256 = 4352$ and $16 \\times 256 = 4096$ vision tokens respectively for images and videos. We train the model with a sequence length of 6144 allowing a maximum of 2048 tokens for the text modality.", + "bbox": [ + 169, + 688, + 826, + 857 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Stage 3 training. In stage 3, we use total of 19.1M high-quality datamix spanning over multiple image, video, and text modalities. We describe this datamix in §A.2. In this stage, we use global batch size of 1024, learning rate of $1 \\times 10^{-5}$ for 8B and $4 \\times 10^{-5}$ for 1B and 3B PLM models. We", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "train the full set of parameters for all scales. Similar to stage 2, we adapt dynamic tiling and uniform frame sampling for up to 36 tiles for image and 32 frames for video, with $2 \\times 2$ spatial average pooling, which generates $(36 + 1) \\times 256 = 9472$ vision tokens for image and $32 \\times 256 = 8192$ vision tokens for video. For all modalities, we use 11264 maximum training sequence length.", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 PLM Training Datamix", + "text_level": 1, + "bbox": [ + 171, + 179, + 382, + 195 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 9 presents the full data mix used across all training stages apart from our manually collected data in §5. This contains annotations from existing public datasets as well as synthetically generated data (see §4). We filter and include a wide variety of existing datasets spanning across images (captioning, QA, grounding), videos (captioning, QA, temporal localization, region captioning and dense captioning) and text-only datasets to preserve the text-instruction following capabilities of our model. Most importantly, we filter out every dataset that contains annotations generated by proprietary models. Table 7 and Table 8 shows the exact number of samples for each datasets in Stage 2 and Stage 3 respectively. Marjory of the data in stage 2 are synthetic, with a focus on captioning samples, since they carry the dense information about the image or video. In stage 3, we have one third of the data, mostly focusing on human annotated samples, covering a large variety of tasks.", + "bbox": [ + 169, + 210, + 826, + 349 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/ab9bd49c00e0ac2c61fb94363fcc5cec51c4b1cbfe090cb6a415f58a3eb577ea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetNum SamplesTypeDatasetNum SamplesType
Image SyntheticImage Synthetic
PDFAcc (QA) [132]12MQAPDFAcc (QA) [132]2MQA
PDFAcc (Cap) [132]12MCap.ArxivCap [134]1.5MCap./QA
UCSF [133]6MQASA1B [105]800KCap.
ArxivCap [134]1.8MCap./QAObject365 [135]300KCap.
SA1B [105]10MCap.OpenImages [136]300KCap.
Object365 [135]3.5MCap.DocVQA [53]100KQA
OpenImages [136]1.8MCap.InfographicVQA [56]50KQA
DocVQA [53]50KQAPixmoCap [11]500KCap
InfographicVQA [56]20KQAVideo Synthetic
PixmoCap [11]600KCapYT-1B (QA) [137]300KMCQA
Video SyntheticEgo4D (Cap.) [115]180KCap.
YT-1B (Cap.) [137]14MCap.Ego4D (QA) [115]700KQA
YT-1B (QA) [137]3MMCQASpoken Moments [138]449KCap.
Ego4D (Cap.) [115]180KCap.Charades [139]8KCap.
Ego4D (QA) [115]700KQAKinetics710 [121]40KCap.
Spoken Moments [138]449KCap.DiDeMo [140]7.5KCap.
Charades [139]8KCap.Text Synthetic
Kinetics710 [121]40KCap.NaturalReasoning [141]1MQA
DiDeMo [140]7.5KCap.Human Annotated
Text SyntheticImage QA [9]2.8MQA
NaturalReasoning [141]1MQAImage Cap [9]36KQA
Human AnnotatedImage Grnd. [9]1.4MQA
Image QA [9]2.8MQAImage Misc. [9]1.4MQA
Video QA [9]570KQAVideo QA [9]570KQA
Video TL [9]16KTemp. Loc.Video Cap. [9]315KQA
Video Dense Cap. [9]10KDense Cap.Video TL [9]16KTL
Text QA [9]2MMixVideo Dense Cap. [9]10KDCap.
Total72.5MVideo Region Captioning [9]15KCap.
", + "bbox": [ + 173, + 375, + 808, + 792 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 7: PLM Stage 2 training data mix.", + "bbox": [ + 171, + 794, + 434, + 809 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 8: PLM Stage 3 training data mix.", + "bbox": [ + 519, + 862, + 785, + 877 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/cd1b284ae8d1f3f30f8088b18b59622d25aa293d59cf4db1f85924f860087e5e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSize
DVQA [142]222222
PlotQA [143]157070
MapQA [144]42761
OCRVQA [145]167646
Localized Narratives [146]199998
FigureQA [147]119999
Hateful Memes [148]9713
CLEVR [149]73181
CLEVR v.0 [149]70000
IconQA [150]116514
TextVQA [112]21953
GeomVerse [151]11162
RobuT (wikiqsl) [152]80757
WebSight [153]10000
Visual7W [154]15961
TallyQA [155]100050
Robust (WTO) [152]42495
DaTik [156]47974
CocoQA [157]46287
ChartQA [109]27395
VQAv2 [111]82772
Chart2Text [158]35946
VisText [159]35995
FinQA [160]5276
DocVQA [53]12089
STVQA [161]18684
TAT-QA [162]2199
RenderedText [163]10435
RAVEN [164]31418
IAM [165]7549
A-OKVQA [39]17720
TabMWP [166]45439
CocoQA [157]9009
TextCaps [167]21953
Screen2Words [168]16713
VSR [169]2157
TQA [170]9742
Robust (SQA) [152]12769
VisualMRC [171]3027
ScienceQA [61]9947
VQA-RAD [172]313
InfographicVQA [56]2118
Hitab [173]4995
AI2D [55]4863
Inter-GPS [174]2555
diagram_image_to_text [175]595
MIMIC-IT (CGD) [176]70539
MultiHiert [177]15233
NLVR2 [178]136799
RAVEN (Multi-image) [164]56081
SpotTheDiff [179]19340
", + "bbox": [ + 173, + 88, + 316, + 426 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/516c83e5d6908944dc93b5e074a65757fe20d9b8f53f088e3a30c9295209e87e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSize
STAR [72]3032
NeXT-QA [69]3870
VISION [180]9900
FlinstonesSV [181]22341
ImageCoDe [182]16594
VizWiz [40]4900
MIT-States (State Coherence) [183]1900
MIT-States (Prop. Coherence) [183]1900
WebQA [184]9338
Birds-to-Words [185]14281
AESOP [186]6915
RecipeQA (Img. Coherence) [187]8699
CLEVR-Change [188]3885
IEEdit [189]3456
ChartQA [109]45820
DocVQA [53]69562
InfographicVQA [56]32661
TextVQA [112]69170
TextCaps [167]21324
VisualMRC [171]24456
WTQ [190]16885
HME100k [191]74492
chrome_writing [163]8825
OK-VQA [110]27536
GeometrySk [174]4802
VQA-RAD [172]1793
Total2796145
Image Cap.
DatasetSize
DOCCI [192]13362
DCI [193]7599
Altogether [194]15166
Total36127
Image Misc.
DatasetSize
AI2d [55]12413
COCO cap. [49]414113
GQA-Balanced [195]943000
Total1369526
", + "bbox": [ + 323, + 88, + 493, + 428 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/b83cdccde56ac234bb0f2e3c8ad905aa0dd7ba0225a99cc30d9278d0de75d545.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Grounding
DatasetSize
VisualGenome [66]154792
Flickr Entities [196]296332
DCI (Region Caption) [193]304912
RefCOCO/g+/ [197]212923
VCR [60]855577
Total1398690
Image Synth.
DatasetSize
DocVQA [53]50170
InfographicVQA [56]21660
PDFAcc (Cap.) [132]12024670
PDFAcc (QA) [132]12024670
UCSF [133]5953490
ArxivCap [134]1859680
SAIB [105]9834573
Object365 [135]3484584
OpenImages [136]1740864
PixmoCap [11]584650
Total47579011
Video QA
DatasetSize
EgoQA [119]7813
NExT-QA (instruct) [69]34114
NExT-QA (MCQ) [69]34114
PerceptionTest [71]2403
ActivityNetQA [76]23530
VideoInstruct (human) [20]25803
CLEVERR (MC) [120]42620
CLEVERR (QA) [120]40000
Kinetics710 [121]39949
SVv2 (classification) [122]40000
VdLNN [123]43126
VdLNN (QA) [123]75090
How2QA [8]45731
STAR [72]35297
Memento [198]40060
Memento-MultImage [198]40060
Total569710
Video Cap.
DatasetSize
VATEX (en caption) [84]259910
Charades (caption) [139]11593
ActivityNet (captions) [125]33375
YouCook2 [83]10337
Total315215
", + "bbox": [ + 503, + 88, + 653, + 436 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/701e00966d2585d20f52c85fd4a84ee611176d8dbb3ce8ee2398f029906d406d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Video Temporal Loc.
DatasetSize
HiREST [199]7919
Charades [139]7566
DiDeMo [140]435
Total15920
Video Region Captioning
DatasetSize
HC-STVG [200]10131
VidLN (UVO subset) [123]5296
Total15427
Video Dense Cap.
DatasetSize
ActivityNet [125]8859
YouCook [83]1039
Total9898
Video Synth.
DatasetSize
Spoken Moments [138]449044
Charades [139]7919
Kinetics710 [121]39949
DiDeMo [140]7566
Ego4D (Cap.) [115]183029
Ego4D (QA) [115]703935
YT-1B (Cap.) [137]14792983
YT-1B (QA) [137]3383670
Total19568095
Text-QA
DatasetSize
no robots [201]9485
MathQA [202]29837
LIMA [203]1030
GSM8k (socratic) [204]7473
GSM8k [204]7473
FLAN [205]156050
Dolly15k [206]15011
Maggie Pro (MT) [207]300000
Maggie Pro [207]300000
Total2056359
", + "bbox": [ + 666, + 88, + 812, + 401 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 9: PLM training datamix. Our mix includes synthetic and manually annotated data across a combination of image data (QA, captioning, OCR, Visual grounding), video data (captioning, grounded captioning, dense captioning, temporal localization) and text-only data. Importantly, all data is publicly accessible, and not generated by proprietary models.", + "bbox": [ + 169, + 443, + 823, + 500 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.3 Ablation Experiment Details", + "text_level": 1, + "bbox": [ + 171, + 527, + 415, + 541 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We provide additional details about the ablation experiment in §6.5. We report benchmark average scores across 5 categories, along with the average across all of them. We select a representative set of benchmarks from the full set of image and video benchmarks in §6.2 and §6.3 that report comparable scores so the average results are meaningful. For Video captioning we select Dream 1K and report the LLM-judge score with Llama3.3 70B as judge. for Short Video QA, and Finegrained QA, we select benchmarks that report MCQ accuracy (and exclude open-ended QA). For Hallucination, we include both benchmarks. For Spatial and Temporal tasks, we select BLINK, CVBench, VSR, and Charades-STA. For Image Perception, we choose SEED, MMMU, VQAv2, OK-VQA, and VizWiz. We train the ablation setup of SFT with the exactly matching hyperparameters as our final run; only difference is the size of the SFT datamix.", + "bbox": [ + 169, + 551, + 826, + 691 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B Synthetic Scaling Experiments", + "text_level": 1, + "bbox": [ + 171, + 710, + 465, + 729 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this section we provide additional results to the synthetic scaling experiments in §4.2. We report aggregate benchmark accuracies across three categories — Video QA, OCR QA and Image QA — by selecting representative benchmarks from each category. For VideoQA, these are STAR [72], EgoSchema [90], MVBench [70], VideoMME [75] and PerceptionTest [71]; For OCR QA, these are ChartQA [109], DocVQA [53], InfographicsQA [56], TextVQA [112] and OCRBench [57]; and for Natural Image QA, these are RealworldQA [45], OKVQA [110], VQAv2 [111], and VizWiz [40].", + "bbox": [ + 169, + 743, + 826, + 828 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Scaling with encoder size. After investigating the impact of the LLM decoder in Fig. 2, we examine the impact of increasing the vision encoder size from 300M (PE Large) to 2B (PE Giant) for each language model scale next. In Fig. 9, we overlay the new power-law with the 2B vision encoder (black dashed) line onto the 300M (red dashed) line. Notably, we find that the larger vision encoder $(300\\mathrm{M}\\rightarrow 2\\mathrm{B})$ leads to greater scaling trend on video QA benchmarks. Quantitatively, the power law", + "bbox": [ + 169, + 842, + 823, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6525a07da003b5c1fffc7c0015dafa7775dff35e2d902f016405d9ed15508a31.jpg", + "image_caption": [ + "Figure 9: Scaling with encoder size. Scaling trends of PE-G vs. PE-L vision encoders. Larger encoders scale better in Video QA tasks while similar scaling in OCR and Natural QA is seen." + ], + "image_footnote": [], + "bbox": [ + 181, + 95, + 413, + 236 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/573bd6d623a8d5b13a56fb1f2ea46ca6cef2b1bfd8d35822d07f3e063f8a145a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 416, + 95, + 614, + 231 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/487a1ee41bc10230d773ecc387fbde4d501749ece8e09a761762110ba188ea0b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 95, + 816, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "fit has improved from $-0.15$ to $-0.19$ . The two lines intersect around 8B scale with PE-G, proving that 8B and larger PLM will benefit more with larger vision encoder. We use PE-L for 1B and 3B LLM scale and PE-G for 8B scale by default.", + "bbox": [ + 169, + 311, + 823, + 354 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/e6fee9284c2eaca7f5a2d382d7a63ae2cdd7e4e337d5878516729ae373c966e7.jpg", + "image_caption": [ + "Figure 10: Scaling with input size. Scaling trends of training with 16 tiles/frames vs. 8 tiles/frames. Higher input size scales better in Video QA and OCR QA tasks while similar trend is seen for Natural QA." + ], + "image_footnote": [], + "bbox": [ + 173, + 385, + 411, + 526 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/cccf7fbf94228c5846ea07593981fd1e54fae7f5523b10ef64fbef5444020990.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 387, + 614, + 522 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/2046c732751f3301f7e0973665e26c74a9d9a78af266e79571054ff968423369.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 387, + 816, + 512 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Scaling with input size. In Fig. 10, we show the impact of increasing the input size to VLM through higher image resolution and more video frames. In this setting, each scale of PLM trains with dynamic tiling for image input and uniform sampling for video input with maximum 8 or 16 tiles/frames per sample. In each plot, the average error of PLM trained with 16 tiles/frames are plotted. All models use $2 \\times 2$ spatial average pooling before input to LLM, and each tile/frame has $448 \\times 448$ resolution. Similar to Fig. 2, we show power law fit with a black dashed line, and compare to 8 tiles/frames training denoted with red dashed line. Notably, we find out that on Video QA and OCR QA benchmarks, PLM shows better scalability with training with higher input size. This means with the same FLOP counts at $10^{13}$ , training with 16 frames makes 2.0 points of metric error lower than 8 frames counterpart (32.2 vs 30.2). Similar trends are observed with OCR QA going from 8 tiles max. to 16 tiles max. Notably, higher resolution did not make a difference for Natural QA tasks. We chose the 16 max-tiles and frames to be our final training setting for stage 2 PLM.", + "bbox": [ + 169, + 614, + 826, + 781 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In Fig. 11, we show the breakdown of the scaling trend shown in §4.2. “H” stands for human only (i.e., no synthetic) baseline. From the breakdown, the most notable point is the the scalability in OCR, Chart, Document QA tasks. In each benchmark, synthetic data makes more than 10 points of improvement on every model scale, compared to “no synthetic” baselines. Moreover, there is no sign of saturation; the performance will most likely improve with more synthetic data. We hypothesize that OCR, Chart, Document QA tasks reduce to “translation” task — a set of pixels has one-to-one mapping to text space. Remaining tasks exhibit clean power-law relationship between metric error and FLOPs. The last plot shows scaling trend on average over all benchmarks, which shows a close power-law relationship.", + "bbox": [ + 169, + 786, + 826, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/4c457bf8ecc0ab0daa868ea993daecb6172db218673b9689778445fa6b15ba2c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 95, + 344, + 209 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/fb955c1df24e9112822420882dbfd46afea779896098cba24e5b220c768b3eac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 95, + 500, + 209 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/74db19474066235320891f11a947c6133e419da84821eecd7a53417ba79ee176.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 95, + 658, + 209 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5e5f6ebd7ada288edd9b5f729682ee4729833b0cb5006c1f67e04520365cd919.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 95, + 815, + 209 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ac7fd38db459c2e2517754cba41a8e08d3c9cf19bf27aa7a1ff5269befcb79a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 215, + 344, + 329 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a9c3bdc1ccd219d5d46e0c496746e9f91aeebbcd86a9846587428409124b6e42.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 217, + 500, + 329 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a99f19209d3bde08b3fb2bba24b4c3b3c12a36b8ddbc73dce7c33808a90a5be5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 217, + 656, + 329 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a294ad701203332c43e130b55e4a3017cfe651e413ef4b236c2112092d14e26f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 217, + 813, + 329 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/11f87cbff7e7f08a9001c94408c8ee70a0f8f29c2ff7c14281721040043a07c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 337, + 344, + 449 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/273c50f3acd90ba19b5a065e5955c09ee0f714dc7f1e3cfc5b137b8e820a6380.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 337, + 500, + 449 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c7494f8c14fe238b6ecea4422a302a3709fa4f8661f97f8802587ed18b99d19d.jpg", + "image_caption": [ + "Power Law Fit" + ], + "image_footnote": [], + "bbox": [ + 506, + 337, + 658, + 449 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8e48e6d52f6bcbb64acd39bed728f3b5432cf2832b70f126f223cba10bc1f95c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 337, + 813, + 449 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4ba34e7af89ce61daee2cab5adc88f0f87203cafb8df2cdb93055231529325fe.jpg", + "image_caption": [ + "Figure 11: Synthetic Scaling Plots. Relationship between Average Error and training compute (in floating-point operations) for various 1B, 3B, 8B PLM with L14 vision encoder. Each plot reports the individual error in VideoMME [75], STAR [72], EgoSchema [90], How2QA [8], MVBench [70], PerceptionTest [71], ChartQA [109], DocVQA [53], InfoVQA [56], OCRBench [57], RealworldQA [45], OKVQA [110], VQAv2 [111], VizWiz [40], and TextVQA [112]. Finally, we report Avg. All, which average over all the metrics." + ], + "image_footnote": [], + "bbox": [ + 181, + 458, + 344, + 580 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ea8890b185233495dcde92782d8a3178b0051f492f0303f6c51666a81220f5fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 458, + 500, + 580 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/806474d8360c64160660f815fe8d5cc8cc35ffc5e40e6f53ac1bfd5ae88e9da8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 458, + 656, + 580 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C VLM Benchmark Details", + "text_level": 1, + "bbox": [ + 171, + 664, + 421, + 679 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this section, we provide details about all the image and video benchmarks considered in §6 including composition and evaluation metrics for image benchmarks (§C.1), video benchmarks (§C.2) and our PLM-VideoBench (§C.3. We also describe evaluation protocol for all these benchmarks including inference parameters and prompts (§C.4). Pointers to evaluation code are linked where available.", + "bbox": [ + 169, + 696, + 823, + 765 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 Image Benchmarks", + "text_level": 1, + "bbox": [ + 171, + 784, + 352, + 797 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Image captioning We evaluate on single image captioning and grounded image captioning benchmarks like COCO [49], nocaps [50] and Flickr [51]. We report CIDEr as the evaluation metric.", + "bbox": [ + 169, + 810, + 825, + 839 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Perception and reasoning We evaluate on broad, general purpose VQA benchmarks like MMMU [37], VQAv2 [111], MMBench [38], OK-VQA [39], VizWiz [40] as well as hard perception benchmarks like BLINK [44], CV-Bench [19], RealWorldQA [45], and VSR [127]. For all MCQ benchmarks, we report accuracy of selecting the correct option.", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Charts, diagrams and documents We evaluate on benchmarks for reasoning over various types of charts, graphs, diagrams, infographics etc. Specifically, DocVQA [53], ChartQA [54], TextVQA [52], InfographicsVQA [56], AI2D [55], OCRBench [57], and SEED [58]. We report accuracy of selecting the correct option.", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Image Hallucination Finally, we evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as HallusionBench [67] and POPE [68]. For HallusionBench we report the $aAcc$ metric (code) which accounts for correctness and consistency using an LLM judge.", + "bbox": [ + 169, + 162, + 826, + 205 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 Video Benchmarks", + "text_level": 1, + "bbox": [ + 171, + 220, + 349, + 234 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Video captioning We evaluate on short-video captioning benchmarks, namely YouCook2 [83] and VATEX [84] as well as recent detailed video captioning benchmarks — DREAM-1k [86] and AuroraCap-VDC [87]. For YouCook2 and VATEX, we report CIDEr score [208]. For DREAM-1k we report AutoDQ F1-score (code) and for AuroraCap-VDC we report the VDC accuracy (code) following the author's proposed metric.", + "bbox": [ + 169, + 244, + 823, + 316 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Short video QA We evaluate on multiple-choice (MCQ) benchmarks such as How2QA [8], NNextQA [69], PerceptionTest [71], STAR [72], TGIF-QA [73], TVQA [74], Video-MME [75] and TVBench [80]. We report accuracy of selecting the correct option. We also evaluate on open-ended question answering benchmarks (w/o options) such as ActivityNet-QA [76] (code), MMBenchVideo [79] (code) and VCGBench-Diverse [22]. We report LLM-judge scores/accuracies for these benchmarks. For VCGBench-Diverse, we report the average of 5 LLM-judge scores (code).", + "bbox": [ + 169, + 330, + 826, + 415 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Long video QA We evaluate on popular long-video benchmarks such as EgoSchema [90], LVBench [92], LongVideoBench [94] and MLVU [96]. We report accuracy of selecting the correct option.", + "bbox": [ + 169, + 429, + 825, + 472 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Fine-grained video QA We evaluate on benchmarks for fine-grained spatial, temporal and detail reasoning in videos such as TemporalBench [99], TOMATO [100], MotionBench [101], TempCompass [102] and CG-Bench [97]. We report accuracy of selecting the correct option. For Temporal-Bench, we report the multi-binary accuracy (MBAcc) (code) proposed by the authors to reduce bias in evaluation.", + "bbox": [ + 169, + 486, + 825, + 555 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Hallucination We evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as VideoHallucer [88] and EventHallusion [89]. We report accuracy of selecting the correct option.", + "bbox": [ + 169, + 571, + 823, + 613 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3 PLM-VideoBench", + "text_level": 1, + "bbox": [ + 171, + 628, + 344, + 643 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We evaluate on our suite of benchmarks for fine-grained and spatio-temporal reasoning in videos. These include:", + "bbox": [ + 169, + 655, + 826, + 681 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Fine-grained QA (FGQA) We report multi-binary accuracy (MBAcc) following prior work [99]. In short, this entails presenting the model multiple independent, binary-choice questions about the same video (in our case, three questions) and requiring the model to gets all of them correct, to count towards accuracy. This sets a higher bar for models, and combats bias in multiple-choice question benchmarks that prior work identifies.", + "bbox": [ + 169, + 698, + 825, + 768 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "SmartGlasses-QA (SGQA) We report LLM-judge accuracy of the predicted answer compared to the ground truth answer. We follow existing LLM judge prompts from ActivityNetQA (code). The prompt is repeated below for completeness.", + "bbox": [ + 169, + 782, + 823, + 825 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Video Region Captioning (PLM-RCap) We use an LLM-judge to generate the similarity scores between predicted and ground truth captions. The prompt is below.", + "bbox": [ + 169, + 839, + 823, + 869 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "**Dense Video Region Captioning (PLM-RDCap)** We adapt the SODA metric [126] from dense video captioning literature for this task. To compute this metric, we use the same LLM-judge from", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "above to generate the pairwise similarity scores between predicted and ground truth captions, which is then fed to the standard metric computation routine.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Region Temporal Localization (PLM-RTLoc) We report standard temporal localization metrics, namely Mean Recall@1, averaged over a range of IoU thresholds [0.3, 0.5, 0.7, 0.9].", + "bbox": [ + 169, + 138, + 826, + 167 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.4 Evaluation Protocols", + "text_level": 1, + "bbox": [ + 171, + 186, + 362, + 202 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Common evaluation protocol. For video benchmark evaluations, we sample 32 frames uniformly from the full video unless otherwise specified. For uniformity and consistency across benchmarks, we implement all LLM-judge evaluations using LLama3.3-70B-Instruct [13], following LLM judge prompts from popular evaluation frameworks [209, 210] where available. Outputs from all models are generated via greedy sampling (temperature 0).", + "bbox": [ + 169, + 215, + 826, + 286 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "SG-QA judge prompt", + "text_level": 1, + "bbox": [ + 181, + 297, + 318, + 311 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:", + "bbox": [ + 179, + 315, + 815, + 366 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "##INSTRUCTIONS:", + "text_level": 1, + "bbox": [ + 181, + 378, + 292, + 388 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Focus on the meaningful match between the predicted answer and the correct answer.", + "- Consider synonyms or paraphrases as valid matches.", + "- Evaluate the correctness of the prediction compared to the answer." + ], + "bbox": [ + 178, + 391, + 795, + 429 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Please evaluate the following video-based question-answer pair:", + "bbox": [ + 179, + 441, + 645, + 454 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Question: [question]", + "bbox": [ + 181, + 454, + 333, + 465 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Correct Answer: [target]", + "bbox": [ + 181, + 467, + 361, + 479 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Predicted Answer: [candidate]", + "bbox": [ + 181, + 479, + 398, + 491 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. For example, your response should look like this: {\"pred\": \"yes\", \"score\": 4.8}.", + "bbox": [ + 179, + 491, + 815, + 580 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "PLM-RCap judge prompt", + "text_level": 1, + "bbox": [ + 181, + 595, + 339, + 609 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Your task is to compare a given pair of captions and provide a single score indicating how correct the pred is compared to GT, on a scale from 0 to 10. Focus on meaning and context, not exact word matches. Penalize missing and incorrect information, with lower scores for more significant errors. High scores require accurate conveyance of all key GT information. Respond with only the score, starting your response with the number and including no additional text. Output format: [score].", + "bbox": [ + 179, + 612, + 815, + 688 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "PLM-VideoBench inference prompts. Table 10 contains example inference prompt examples for each PLM-VideoBench task. Note that some variation exists between instances in the benchmark. For example, for RCap a prompt may be \"What is happening to the subject in the region highlighted by the red rectangle ...\" instead of \"Give a detailed description of the events occurring in the region marked by the red rectangle ...\" however they convey the same underlying instruction and information.", + "bbox": [ + 169, + 710, + 823, + 781 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proprietary models like GPT-4o and Gemini require more careful prompting to ensure that the output formatting is respected. For example, we append instructions to prevent model hallucinations (e.g., \"You must use these frames to answer the question; do not rely on any external knowledge or commonsense\"), to prevent refusals to answer (e.g., \"Even if the information in these separate frames is not enough to answer the question, please try your best to guess an answer which you think would be the most possible one based on the question. Do not generate answers such as not possible to determine\") and in-context examples to help guide the model towards the correct output format. Model- and benchmark-specific inference prompts will be released along with our code for full reproducibility.", + "bbox": [ + 169, + 786, + 826, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/6462db27df34dca5202d51ff30b4e6d53ccde5c5c7a6e1bbdb6382312ad5451a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskPrompt
FGQAQuestion: [question] \\n Options: \\n (A) [option1] \\n (B) [option2] \\n Only give the best option.
SGQAThe following question is asked by the camera wearer at the end of the video. Provide a detailed answer even if unsure. Try to answer in around 20-30 words. Now answer the following question based on the video content: [question]
RDCapCreate a dense caption of the subject's actions within the red rectangles, including action frames ids and brief descriptions. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video.
RCapGive a detailed description of the events occurring in the region marked by the red rectangle within frames ([start frame], [end frame]) in this 32 frame video
RTLocGiven the region marked by the red rectangle in the video, please provide the start and end frame of when '[event]' happens. Use the format (start, end), where start and end are frame numbers between 0 and 31 in this 32 frame video.
", + "bbox": [ + 173, + 88, + 823, + 327 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D Additional PLM-VideoBench Results", + "text_level": 1, + "bbox": [ + 171, + 388, + 519, + 405 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We present benchmarking results across all model scales (1B, 3B, 8B) in Table 11, to supplement the 8B model results in the main paper (Table 5). Our approach consistently outperforms baselines across all scales, including proprietary models whose model scale is unknown.", + "bbox": [ + 169, + 421, + 823, + 464 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/1828408ddca6d94bb42085ebace965210f63ef6d5aaefc0c5f8fd1a9b62002e5.jpg", + "table_caption": [ + "Table 10: PLM-VideoBench task prompts. Items in square brackets are placeholders filled in for each benchmark instance." + ], + "table_footnote": [], + "table_body": "
ModelFGOAMBaccSGQAAcc†RDCAPSOA‡RCapscore†RTLocmeanRAvg
Human perf.90.967.966.653.967.870.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
1B scale
Qwen2VL-2B [30]39.038.50.918.110.829.1
InternVL2-1B [10]35.828.90.317.22.723.8
InternVL2.5-1B [10]42.339.66.723.61.630.8
PLM-1B57.640.950.340.957.749.4
3B scale
Qwen2.5 VL-3B [106]43.745.10.317.213.933.1
InternVL2-4B [10]43.241.70.519.99.630.3
InternVL2.5-4B [10]50.049.24.925.915.435.3
PLM-3B67.138.853.145.058.253.0
8B scale
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
", + "bbox": [ + 344, + 478, + 651, + 748 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 11: PLM-VideoBench results across all model scales to supplement results in Table 5.", + "bbox": [ + 189, + 750, + 803, + 763 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E Baseline Implementation Details", + "text_level": 1, + "bbox": [ + 171, + 791, + 478, + 809 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We provide baseline-specific implementation details for all models in §6.1 of the main paper.", + "bbox": [ + 169, + 824, + 779, + 839 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Proprietary baselines We evaluate the GPT and Gemini family of models. For GPT-4o, we use the GPT-4o-2024-11-20 checkpoint. We feed 32 uniformly sampled frames regardless of video length, loaded at high image quality setting. For Gemini, we evaluate Gemini-1.5-Pro and Gemini-2.0-Flash. For VQA tasks, we input the video (without audio) which is processed internally at 1 fps. For", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "spatio-temporal tasks (RCap, RDCap, and RTLoc) we use the same inputs as for open-source models and GPT-4o. We evaluate these models using API call.", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Open-source models We evaluate InternVL, Qwen, Molmo and Llava-OV models. We follow official implementation and preprocessing pipelines for each. Specifically, we evaluate InternVL2 and InternVL2.5 (code); QwenVL2 and QwenVL2.5 (code); Molmo-O-0924 (code) and Llava-OV (code). For QwenVL, we sample frames at 1 fps from videos. For InternVL2, we use 12 tiles per image as this more closely matches the reported results.", + "bbox": [ + 169, + 133, + 826, + 205 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Human performance baseline. In Table 5, we report human performance on PLM-VideoBench. For each task, we present annotators with the test sets and collect answers for each instance given the standard task prompt. Given the difficulty of RDCap, we reuse our data annotation pipeline in $\\S H$ to collect new dense captions independently, rather than providing the standard task instruction.", + "bbox": [ + 169, + 218, + 826, + 277 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F Additional Results", + "text_level": 1, + "bbox": [ + 171, + 292, + 364, + 309 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F.1 Comparison with LLaMA-3V", + "text_level": 1, + "bbox": [ + 171, + 324, + 421, + 340 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/828b315a69fb2c08cbb43552a082a1d2df5550d03f5affc2807edeba28365435.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAvg.DocVQA (test) acc [53]CharQA (test) acc [54]TextVQA (test) acc [52]InfoQA (test) acc [56]AL2D (two mask) acc [55]MMMU (val) acc [37]VQAV2 (val) acc [111]
LLaMA 3.2V (11B) [13]73.088.483.479.763.691.150.775.2
LLaMA 3.2V (90B) [13]76.690.185.582.367.292.360.378.1
PLM (1B)67.190.778.682.163.084.934.881.7
PLM (3B)74.493.884.384.374.690.941.284.3
PLM (8B)76.294.686.586.580.992.746.185.6
", + "bbox": [ + 292, + 356, + 697, + 455 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F.2 Image Captioning", + "text_level": 1, + "bbox": [ + 171, + 518, + 339, + 535 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/9709cd79ff83abbab0f0a275a7f68a66ecc1857a6d9ed49ff9b58854b7d5d41e.jpg", + "table_caption": [ + "Table 12: PLM versus LLaMA-3V on Image Benchmarks: Note that we use LLaMA-3V-90B [13] for generating image captions in our synthetic data engine." + ], + "table_footnote": [], + "table_body": "
ModelCOCO (karnathy) CIDEr [49]Nocap CIDEr [50]Flickr CIDEr [51]
Proprietary
GPT-4o [33]74.476.671.7
Gemini 1.5 Pro [35]70.671.168.2
Gemini 2.0 Flash [35]84.885.066.6
1B scale
Qwen2VL-2B [30]107.1101.286.0
InternVL2.5-1B [10]122.6110.586.1
PLM-1B138.6124.2100.5
3B scale
Qwen2.5 VL-3B [106]101.7105.577.5
InternVL2.5-4B [10]125.4117.187.4
PLM-3B144.9126.598.0
8B scale
LLaVA-OV-7B [28]112.170.755.7
Qwen2.5VL-7B [106]36.832.734.9
InternVL2.5-8B [10]125.8116.796.5
PLM-8B146.7129.9105.6
", + "bbox": [ + 401, + 550, + 602, + 781 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 13: Image Captioning benchmarks. PLM versus proprietary models and open-access baselines of comparable scale on Image Captioning benchmarks.", + "bbox": [ + 169, + 787, + 826, + 816 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F.3 Image Grounding", + "text_level": 1, + "bbox": [ + 171, + 90, + 339, + 107 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/f35c016082e347045eb81156cdaf9761cb71b628a67cb8a1fc750df128649a34.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelRefCOCOvalRefCOCO testARefCOCO testBRefCOCO+ valRefCOCO+ testARefCOCO+ testBRefCOCOg valRefCOCOg testAvg.
Specialists
GroundingDINO [211]90.693.288.288.289.075.986.187.086.6
UNINEXT-H [212]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [213]90.693.288.288.289.075.986.187.086.6
1B scale
PLM-1B88.591.584.883.288.676.586.086.485.7
3B scale
Qwen2.5 VL-3B [106]89.191.784.082.488.074.185.285.785.0
PLM-3B93.394.989.589.893.684.290.890.990.9
8B scale
Cube-LLM [214]90.992.687.983.989.277.486.687.287.0
Qwen2VL-7B [30]91.793.687.385.890.579.587.387.887.9
Qwen2.5VL-7B [106]89.191.784.082.488.074.185.285.785.0
InternVL2-8B [10]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [10]90.394.585.985.291.578.886.787.687.6
PLM-8B90.691.885.987.391.381.188.889.288.2
", + "bbox": [ + 295, + 125, + 699, + 332 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.4 Long Video Understanding", + "text_level": 1, + "bbox": [ + 171, + 397, + 403, + 414 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/f5228af189058077b100ec6c0dc4d98c7c57510f2dd8d1516560b8cd3d8e1e1d.jpg", + "table_caption": [ + "Table 14: Image Grounding results on RefCOCO+/g. PLM performs competitively compared to the baselines across all model scales, and outperforms specialist models for the image grounding task." + ], + "table_footnote": [], + "table_body": "
ModelLong Video QA
LVBench acc [92]Long VideoBench (val) acc [94]MLVU (dev) Marq [96]
Proprietary
GPT-4o [33]37.266.7*67.4
Gemini 1.5 Pro [35]33.1*64.0*69.9
Gemini 2.0 Flash [35]-61.6*69.5
1B scale
Qwen2VL-2B [30]42.047.962.7
InternVL2-1B [10]31.443.3*52.0
InternVL2.5-1B [10]35.347.957.3*
PLM-1B40.052.358.9
3B scale
Qwen2.5 VL-3B [106]43.3*54.2*68.2
InternVL2-4B [10]34.053.0*59.9*
InternVL2.5-4B [10]40.156.368.3*
PLM-3B40.457.965.0
8B scale
LLaVA-OV-7B [28]38.855.764.6
Qwen2VL-7B [30]46.055.869.8*
Qwen2.5VL-7B [106]45.3*56.0*70.2*
InternVL2-8B [10]37.055.464.0*
InternVL2.5-8B [10]43.2*60.0*68.9
PLM-8B44.556.966.4
", + "bbox": [ + 397, + 430, + 604, + 720 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 15: Results on long video understanding tasks. We compare PLM with open-access baselines and proprietary models of comparable scale, and report results over 3 long video QA benchmarks. Cells with * are reported numbers from literature. The remaining are reproduced using official code.", + "bbox": [ + 169, + 726, + 826, + 768 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G PLM-FGQA: Fine-grained QA", + "text_level": 1, + "bbox": [ + 171, + 89, + 473, + 107 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We present PLM-FGQA Fine-grained QA (FGQA), a video dataset focused on \"how\" actions are performed, capturing nuanced fine-grained details through specially designed questions and carefully annotated answers. Due to the scarcity of fine-grained video Q&A data, see Table 16, we built a data engine to enable the collection of our 2.4M Q&A dataset, PLM-FGQA.", + "bbox": [ + 169, + 121, + 823, + 178 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/efa02aa4bae3e91c46df6512d791277416bd6ded15e1faf03355c41f6db2cee2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetYear#Q&AsDatasetYear#Q&As
MovieQA20166462STAR202160000
MSRVTT-QA2017243690CLEVRER202382620
TGIF-QA2017165165EgoQA202419000
MSVD-QA201751000PerceptionTest202444146
TVQA2018152545VideoInstruct202425803
ActivityNetQA201958000MoVQA202421953
How2QA202044007CinePile2024303828
Next-QA202152044Sports-QA202594000
PLM-FGQA20252379067
", + "bbox": [ + 271, + 191, + 722, + 325 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 16: Comparison of our PLM-FGQA dataset with existing video-QA datasets.", + "bbox": [ + 222, + 330, + 771, + 345 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G.1 Annotation process: Data Engine", + "text_level": 1, + "bbox": [ + 171, + 383, + 449, + 398 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Our data engine is built upon the following modules: (1) Temporal Segment Generation, (2) Question Generation, (3) Answer Generation, (4) Human Annotation (answer verification/manual answer annotation), (5) Quality Control, as illustrated in Figure 12. Next, we describe each module in detail, and finally also provide additional details about the extra steps we took for forming the FG-QA component of PLM-VideoBench out of these annotations.", + "bbox": [ + 169, + 410, + 823, + 479 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/74e60f80c4d1caa05d26073b5154c8482957d25d0a6ca771bb72b8df194ab130.jpg", + "image_caption": [ + "Figure 12: Data engine used to collect the PLM-FGQA dataset." + ], + "image_footnote": [], + "bbox": [ + 173, + 494, + 823, + 547 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G.1.1 Temporal Segment Generation", + "text_level": 1, + "bbox": [ + 171, + 597, + 444, + 612 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We source the video data that serves as a basis for our annotations from publicly available datasets. Based on the video sources and the type of existing annotations, we split the videos into three distinct categories.", + "bbox": [ + 169, + 621, + 823, + 662 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Videos with existing ground-truth segment annotations: We directly adopt segments with their human-annotated action annotations from the following datasets: Ego4d Goal-Step[215], Ego4D Moments[115], EgoExo4D [116], HT-Step[216, 217], COIN [117], CrossTask [118], and YouCook2 [83]. All those sources provide video segment boundaries accompanied by some form of textual action descriptions, and are therefore readily usable with the rest of the pipeline.", + "bbox": [ + 169, + 669, + 823, + 739 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Unedited videos of physical activities: For physical activities videos (e.g. basketball, dancing, soccer), actions are usually atomic and short (e.g. dribble, dance move, kick) and therefore rerequire precise temporal localization. To source videos for these scenarios we used data from EgoExo4D [116] that contains temporally well-aligned and precise narrations; we obtained segments of 2-3 seconds centered around narration timings, and used the anchor narrations directly as the action description.", + "bbox": [ + 169, + 744, + 823, + 816 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Raw, untrimmed videos in-the-wild without temporal segment annotations. We source a very large part of our data from untrimmed instructional videos in the large-scale HT100M dataset [114] which we first need to segment before use. The goal is to obtain video clips that contain meaningful, salient actions, and also caption the resulting segments with concise but accurate action descriptions. We describe the automatic segmentation and captioning module in the following.", + "bbox": [ + 169, + 821, + 823, + 891 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The automatic segmentation and captioning pipeline involves the following three stages:", + "bbox": [ + 171, + 897, + 750, + 912 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/9c9906e83a225a513caff2dfdd4843d2b7b4a39adfecf3ff65b862754c2765b7.jpg", + "image_caption": [ + "Figure 13: Distribution of question types (left) and video sources (right) in the FGQA component of PLM-VideoBench." + ], + "image_footnote": [], + "bbox": [ + 174, + 89, + 514, + 237 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/f8993a3092ffb9f6800aa080896e31690641b37cada9f9291ff4057e8ca301a8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 90, + 816, + 236 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Temporal segment proposal. Given untrimmed long videos, the first step is to identify semantically coherent segments within them. Inspired by prior work on unsupervised action proposal and segmentation, we leverage visual feature clustering to generate temporal segment proposals, and use shot-boundary detection results to further refine the segment boundaries. We extract clip-level visual features[218] using a sliding window with temporal stride of 1 second. We then compute the pairwise similarity between neighborhood features and detect the class-agnostic action boundaries using a boundary detection kernel (similar to those used in literature[219, 220]). Finally, since the detected segments are usually over-segmented, we perform a bottom-up agglomerate clustering approach to group adjacent segments into clusters, using a segment duration prior of 10 seconds. We also leverage shot boundary detection[221] to obtain precise moments of scene changes: we refine the boundaries of the segment proposals by aligning them to the detected shot boundaries when they're sufficiently close ( $\\leq 1$ second).", + "bbox": [ + 169, + 311, + 826, + 477 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Segment filtering and ranking. How-to videos often include a lot of content that is irrelevant to the demonstration of the activity at hand, such as the instructor explaining what they are about to do or showcasing tools and ingredients. It is therefore important to detect and filter segments with such uninformative content. To that end we rank candidate segments according to relevance using a series of heuristics and learned models, described below.", + "bbox": [ + 169, + 483, + 823, + 553 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "a. Talking head detection. A common mode in instructional videos is instructors talking into the camera, describing objects or explaining actions they're about to take. To detect and remove such segments, we employ an Active Speaker Detection (ASD) pipeline[222], which we run densely on every video and combine resulting talking head tracks, to produce an ASD score for every segment.", + "b. Hand-object interaction (HOI) detection. The presence of hand-object interaction (HOI) can be a good indicator of visually groundable actions. We leverage the temporal selection strategy[223] to filter out the segment proposals that contain hand-object interaction. We first employ an off-the-shelf robust HOI detector[224] to densely extract HOI regions within a proposed segment. The HOI score is then calculated by measuring the likelihood of hand-object interaction in the segment and the averaged probability of all the detected hands.", + "c. ASR groundability. HT100M videos contain timestamped ASR captions, which are speech transcriptions of the audio instructions. It is desirable to rank candidate segments based on how likely their ASR content is to their video content. The hypothesis here is that segments containing ASR transcriptions that align well to the video content, are more likely to be visual-information rich. Moreover since the action labeling pipeline (described next) relies on ASR metadata for producing descriptions, higher ASR groundability scores make it likelier to produce good quality segment descriptions. For every candidate segment, we compute an ASR-groundability score by computing video-text alignment scores[218] for each ASR caption within the segment and then averaging the ones that are above a threshold (we use 0.5).", + "d. Relevance classification. The above heuristics work well for the clear-cut cases they are tailored for, but in practice we found that they struggle with more nuanced segments (e.g. instructor fiddling with an object and describing it rather than using it). To improve the detection of those cases, we manually labelled a small amount of segments that passed through the other filters and trained a binary classifier to classify them as \"relevant\" or \"irrelevant\"; to that end we trained a simple 2-layer MLP classifier" + ], + "bbox": [ + 169, + 559, + 823, + 912 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "on top of temporally pooled video representations with a logistic loss for binary classification. We deployed the trained model to provide a relevance score for all the candidate segments.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We combined the scores resulting from all the modules described above and determined cutoff thresholds, based on a small manually annotated validation set. In production, we keep all the segments that have relevance scores above those thresholds.", + "bbox": [ + 169, + 126, + 825, + 167 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Segment captioning We follow a two-step process to obtain action labels for each unlabeled segment: In the first step, a collection of off-the-shelf perception models are used to extract individual image-level captions, video-level captions, and object detections from the segment. The output of all perception models is then fed as text into an LLM to generate long, fine-grained captions. At the second step, the detailed captions are fused with the ASR content of the segment, to obtain a consice action description. Specifically, we query an LLM (Llama 3.3 70B [13]) with the following prompt:", + "bbox": [ + 169, + 174, + 826, + 258 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Segment to action labels prompt", + "text_level": 1, + "bbox": [ + 181, + 270, + 413, + 282 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Detailed description: [fine grained caption] ASR transcription: [asr caption]. Given the detailed description above, identify the specific action performed as part of the activity [task name]. Your response must not be the same as the activity [task name] and needs to be a specific substep within the activity [task name]. Please also supply a rationale for your answer.", + "bbox": [ + 179, + 286, + 815, + 349 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The extracted labeled video segments obtained through the above process serve as the foundation for the subsequent Q&A generation.", + "bbox": [ + 169, + 362, + 823, + 390 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "G.1.2 Automatic Question Generation", + "text_level": 1, + "bbox": [ + 171, + 407, + 452, + 422 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We automatically generate questions about the fine-grained details of the way activities are executed in the video. Our questions is generated with a variety of prompts and models which lead to increased question diversity and specificity. In Table 17 we present the question types and sample questions per question type. Here, we summarize how these questions are generated automatically with an ensemble with models and prompts:", + "bbox": [ + 169, + 433, + 823, + 503 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "LLM-based action-conditioned question generation Given a segment, its action name (e.g., cut potatoes), a task name (e.g., How to make sweet potato gratin) and optionally other metadata about the segment (for example, recognized objects [?]), we generate questions that can elicit descriptions of fine-grained details by raters with an LLM. We use tailored prompts for generating questions that cover how the activity is executed (tools, object locations, object states, direction of movements, hand pose), and the spatial arrangement of objects.", + "bbox": [ + 169, + 508, + 825, + 592 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Activity FG question generation prompt", + "text_level": 1, + "bbox": [ + 179, + 604, + 462, + 618 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "I am learning how to [action name] while [task name]. Ask me [N] most relevant questions that reveal the details of the way the step is executed in my environment, e.g., (a) part location, (b) types of tools/ingredients used, (c) direction of movements, (d) how are objects held, (e) object states at the beginning of the step, (f) object state at the end of the step. The questions must be answerable by visually observing the activity, without reading instructions or trying out. Please indicate the type of question from (a) to (f) for each question asked at the beginning of the question.", + "bbox": [ + 179, + 621, + 815, + 710 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Spatial FG question generation prompt", + "text_level": 1, + "bbox": [ + 181, + 724, + 455, + 738 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Imagine I have no common sense or understanding of the 3D real world. I am trying to [task name] and am at the step where I am [action name]. There's [object list] when I'm [action name]. Ask me [N] questions about the 3D position of objects, relative location between objects, distance between objects, spatial relationship using prepositions like above, below, next to, etc. that I might want to know. The questions must be answerable by only visually observing me performing activity, without reading instructions or trying out.", + "bbox": [ + 179, + 741, + 815, + 830 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We explicitly encourage the LLM to provide questions that can be answered solely based on the video frames, in contrast to questions that are focused on external knowledge or non-groundable concepts or judging the execution of the step (e.g., avoid questions like is the pan hot enough to add the oil?), what tool is typically used to loosen the axle nut). The rationale for this is to collect as many Q&A pairs that a model cannot answer just based on external knowledge/language prior, but they rather", + "bbox": [ + 169, + 842, + 826, + 912 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "require vision perception to be answered. Note that these questions are generated without visual input, hence they are not instance-specific and might not be answerable given the video segment.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "VLM-based instance-specific question generation After collecting a first set of Q&As using the LLM-generated questions, we bootstrap a VLM Question Generator model, which takes as input the video segment, question types and optionally the task name, and generates a set of instance-specific visual questions. The VLM Question Generator model is obtained by supervised fine-tuning of PLM with a question generation instruction-tuning dataset which consists of triplets (video, prompt, response), where the prompt includes the instruction to generate questions based on question types and the response includes example questions to be generated for the given video. Due to the lack of such a dataset with fine-grained question, we synthetically generated it by utilizing the Q&A pairs obtained based on the LLM-generated questions. Specifically, for each video segment, we use an LLM to (1) decompose existing Q&A pairs into multiple Q&A pairs, with each new question focusing on one detail of the original answer; (2) tag question types for the generated questions based on an expanded list of question types; and (3) generate a (prompt, response) pair for the segment. This resulted in $\\sim 600k$ training instances.", + "bbox": [ + 169, + 126, + 826, + 305 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "VLM Question Generator training sample", + "text_level": 1, + "bbox": [ + 179, + 316, + 464, + 329 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Generate 3 different questions that reveal the fine-grained details of the way the activity is executed. In particular, focus on these question types: fine-grained object locations, hand pose, object/repetition counts, generating at least one question per type. Write each question in a separate line, e.g., Q1. first question.", + "bbox": [ + 179, + 332, + 815, + 383 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Q2. second question.", + "bbox": [ + 181, + 385, + 331, + 396 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "ON. N-th question.", + "bbox": [ + 181, + 405, + 313, + 420 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 181, + 421, + 250, + 433 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Q1. Where are the tomatoes positioned prior to being cut?", + "Q2. How is the person grasping the tomato with their left hand?", + "Q3. How many tomatoes did the person use in the segment?" + ], + "bbox": [ + 181, + 434, + 648, + 470 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "LLM-based follow-up question generation This final set of questions aims to increase coverage of video details and generate highly fine-grained questions by leveraging the already collected Q&A pairs for each segment and feed them to an LLM that generates \"follow-up\" questions that are more detailed and challenging than the initial questions.", + "bbox": [ + 169, + 482, + 823, + 539 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Follow-up question generation prompt", + "text_level": 1, + "bbox": [ + 179, + 549, + 449, + 561 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "I have the following information gathered about the video: [list of previous Q&A samples] Utilizing information and details from all the provided Q&A pairs (make sure to specialize questions based on the already corrected answers, e.g., using referring expressions), ask [N] most relevant and interesting, visual questions that we can ask annotators in order to reveal NEW, rich, additional fine-grained details about the video that we don't know yet, in particular about the following question types: 'tools/ingredients', 'object counts', 'repetition counts', 'direction of movement', 'hand pose', 'fine-grained object locations', 'spatial relations', 'initial state/end state', 'action happened before/after', 'clothes wearing', 'body pose', 'main action in the video', 'temporal extent of action', 'sizes'. The questions should be specific and have a specific answer. Avoid generic questions that can be very tedious to answer, e.g., how many objects are there in the scene. Also, do not generate questions that start with \"Is ...\" and then list options. Prefer open-ended questions, e.g., starting with \"How\". [... More examples & formatting ...]", + "bbox": [ + 178, + 565, + 818, + 742 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "G.1.3 Automatic Answer Generation", + "text_level": 1, + "bbox": [ + 171, + 762, + 442, + 776 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The next step of the data engine aims to produce correct and comprehensive answers to the generated questions. We obtain automatic answers to the generated questions using a version of PLM that has been fine-tuned with extra privileged information of various forms as input. The privileged information includes textual annotations from the metadata available with the candidate training videos and feature embeddings extracted from off-the-shelf models. Useful textual metadata include the video title, ASR captions or written descriptions, video-level task name (inferred by an LLM using the title and captions), and any existing QAs for that video. Off-the-shelf embeddings include frame-level features extracted denseley at 1 fps; we use an open-vocabulary object detection model, OWLv2 [225], for embedding object detection information and CLIP ViT-L14 embeddings [226]", + "bbox": [ + 169, + 787, + 826, + 912 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/48e62c91a4f9cb332ecf9e55e6ff53d1d47295a99bc55f91839966cf6ebf9686.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Question TypeSample Questions
Action RecognitionWhat is the process being performed on the sandpaper? \nWhat is the action shown?
Action SequenceWhat does the person do after brewing the tea? \nWhat does the person do before marking the vinyl with a pencil?
Counting ProblemsWhat is the quantity of universal down cleaner being poured into the task area? \nHow many branches does the person cut in total? \nHow many times does the person spray Greased Lightning onto the ketchup spill?
Movement DirectionIn what direction is the black welding tool pointing while the person is working on the metal joint? \nHow does the person chop the garlic with the knife?
Object AttributesWhat is the color of the seatpost shown in the video segment? \nWhat is the shape of the tube at the end of the step? \nWhat is the size of the knife being used to chop the spring onions?
Object LocationWhere does the person put the honey bottle away? \nWhere does the person position the clothes before ironing?
Object RecognitionWhat type of roller and paint are being used? \nWhat does the person place on top of the smooth half of the egg carton? \nWhat was the person initially holding in their left hand?
Object StateHow would you describe the sink at the beginning of the cleaning process? \nWhat is the state of the nematode after mixing it with water and sponge?
OtherAt what point in the video is the person seen holding the wires?
PoseHow are the woman's legs positioned while she is sitting? \nHow bent is the left elbow during the activity?
Spatial RelationsHow far is the bias tape maker from the right edge of the ironing board? \nWhat is the spatial relationship between the bowls and the Brussels sprouts on the kitchen countertop?
Speed/ForceHow would you describe the consistency of pressure applied during sanding? \nHow fast does the person initially push the stone?
", + "bbox": [ + 184, + 87, + 812, + 549 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 17: PLM-FGQA question types and sample questions", + "bbox": [ + 297, + 555, + 697, + 570 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "for scene classification information. We incorporate the textual annotations directly into language prompts using the following template:", + "bbox": [ + 169, + 598, + 823, + 627 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Automatic answer generation prompt", + "text_level": 1, + "bbox": [ + 179, + 636, + 433, + 648 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A video is showing a task [video level task name], specifically the part where [ASR caption]. Here is what we already know about the video: [existing question-answer pairs]. Answer this question in detail: [question]", + "bbox": [ + 178, + 652, + 816, + 691 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The off-the-shelf embeddings are incorporated into the PLM input via an additional Perceiver-IO[227] tokenizer, which summarizes the embeddings at the segment level.", + "bbox": [ + 169, + 699, + 823, + 729 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We fine-tune the answer generator on 1M manually annotated QA pairs. After fine-tuning, we deploy the trained answer generator with privileged information access on the unlabelled questions produced in the previous step, to produce automatic answers.", + "bbox": [ + 169, + 734, + 826, + 777 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "G.1.4 Human Annotation", + "text_level": 1, + "bbox": [ + 171, + 791, + 366, + 805 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "After obtaining segments and generating questions and automatic answers, we employ human annotators to obtain high-quality answers. Our answer annotations include the following:", + "bbox": [ + 169, + 816, + 823, + 844 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Human-verified answers: Raters are provided with the model-generated answer and are asked to accept or reject the answer. They can reject questions for being irrelevant or unanswerable, and answers for being factually incorrect or lacking details. Accepted question-answer pairs proceed without changes, while rejected ones are handled differently:", + "bbox": [ + 215, + 854, + 826, + 912 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "question-related rejections (irrelevant or unanswerable) are discarded, whereas answer-related rejections (factually incorrect or lacking details) are marked for correction in the next phase. $17.8\\%$ of the total training samples are human-verified automatic answers.", + "bbox": [ + 227, + 90, + 823, + 133 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Human annotated answers: Raters answer the questions from scratch by ensuring to cover all the relevant details within the temporal segment. They receive reference information, such as video-level task names and ASR captions, and may use online resources like WikiHow for additional context. Questions that cannot be answered based on the video segment (for example, due to some false premise) are rejected (with an explanation). These manually annotated answers make up $82.2\\%$ of the PLM-FGQA training split, and $100\\%$ of the evaluation set.", + "bbox": [ + 215, + 140, + 823, + 237 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Quality Control. Data quality is crucial for model success. We followed several strategies to monitor and enhance annotation quality: annotation Certification - we reviewed a small sample of annotations from each rater before they could work in production queues, ensuring that annotators met high-quality standards before advancing to production; golden Examples - annotators were provided with high-quality annotation examples, highlighting common error patterns and offering acceptable answers. targeted and Dual QA - we conducted daily audits, including vendor auditing and our own sampled quality control. In total, $13\\%$ of the training set was audited, and $100\\%$ of the samples in PLM-VideoBench underwent quality control.", + "bbox": [ + 169, + 250, + 826, + 362 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "G.2 FGQA PLM-VideoBench Construction", + "text_level": 1, + "bbox": [ + 169, + 378, + 491, + 395 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/843cf4224cafa9d4d49787a9e773e2c299b0ce83ddfd981d9e5567f64e76f276.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TrainTest
Sources stats
Total Videos767k3.6k
Unique Source Videos251k1.9
Average Duration (sec.)9.812.3
Annotations stats
Number of QA Pairs2.4M4.2k
Number Question Types1212
Question Length (avg/max)12/11412.3/56
Answer Length (avg/max)13.3/91114.1/62
Annotation TypeHumanHuman
Open-DomainYesYes
", + "bbox": [ + 341, + 415, + 651, + 583 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 18: Statistics of the PLM-FGQA training and test data. The test split refers to the FGQA module of PLM-VideoBench.", + "bbox": [ + 169, + 585, + 823, + 613 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The FG-QA component of PLM-VideoBench is formed from a held-out portion of PLM-FGQA. We refine this set and transform it into a challenging MCQ-based benchmark by (1) generating MCQs, (2) filtering out samples that can be answered by text-only (blind) LLMs, (3) performing human verification of negatives, and (4) balancing the distribution of question types and domains. The statistics of the dataset are summarized in Table 18. In more detail the steps we followed are:", + "bbox": [ + 169, + 641, + 823, + 712 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "MCQ Generation: To transform QAs into challenging MCQs for evaluation, instead of generating random incorrect answers, we prompt LLMs to produce hard negatives that are semantically close to the correct answer. We use the following prompt which was designed to generate distractors that differ from the correct answer by only a single detail. In effect this enables evaluation to assess fine-grained reasoning about object attributes and tool distinctions.", + "bbox": [ + 169, + 717, + 823, + 787 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Filtering Text-Only Answers: To ensure that video-based reasoning is required, we test whether a text-only LLM can answer the question correctly without seeing the video. If a question can be answered correctly from text alone, we remove or modify it to emphasize visual and temporal grounding.", + "bbox": [ + 169, + 792, + 823, + 849 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Human Verification of Negatives: Automatically generated negatives may sometimes be factually true despite being labeled as incorrect. To address this, we perform human verification, where annotators review distractors to confirm that they are both plausible yet definitively incorrect given the video context.MCQs with ambiguous distractors are removed.", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Balancing Question Types: Finally, after the above postprocessing and filtering is done, we rebalance the test set, to make sure that the question type and domain distributions are approximately uniform, by undersampling over-represented question types and domains.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Note on the evaluation metric. We report the multi-binary accuracy (MBAcc) [99] to evaluate on the FG-QA task. This accuracy is calculated by comparing the correct answer to each distractor individually. Specifically, for each question, we generate a series of binary questions, where the correct answer is compared with one distractor at a time. A prediction is considered correct only if the correct answer is consistently selected across all binary comparisons. We preferred this metric to vanilla MCQ accuracy as it greatly reduces the predictability of automatically-generated MCQs.", + "bbox": [ + 169, + 138, + 823, + 223 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "MCQ generation prompt", + "text_level": 1, + "bbox": [ + 179, + 238, + 339, + 251 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Here is a question and answer pair about a video:", + "bbox": [ + 181, + 255, + 542, + 266 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Q: [question]", + "bbox": [ + 181, + 268, + 279, + 279 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "A: [answer]", + "bbox": [ + 181, + 281, + 264, + 292 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "You need to transform this into a high-quality multiple-choice question. To do this, first rephrase the given correct answer and then provide n distractor answers. The n incorrect answers should be reasonable and valid responses to the question, but should have a different meaning than the correct answer. You generate an incorrect answer from the correct one by changing a single detail, e.g. an object or verb/action that is relevant to what's being asked. Make the incorrect answers realistic, plausible and similar enough to the correct answer so that it is very difficult for someone to distinguish between them with prior knowledge alone. Finding the correct answer should also require visual information about the scene. The distractor answers should answer the question, but should be incorrect but in a non-obvious way. When changing a single detail to create the distractors, make sure that this detail is the main point of the question. For example, if the question is about the color of an object, then the distractor should change the color of the object and not the kind of object.", + "bbox": [ + 181, + 292, + 815, + 455 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Here are some examples of good distractors (desired) and bad distractors (to be avoided):", + "bbox": [ + 181, + 455, + 815, + 481 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Q: What is the person wearing on their hands while applying varnish?", + "bbox": [ + 181, + 482, + 683, + 494 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "A: The person is wearing white gloves on their hands while applying varnish with a brush.", + "bbox": [ + 181, + 494, + 813, + 518 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Good distractors:", + "bbox": [ + 181, + 518, + 307, + 530 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- The person is wearing black gloves on their hands while applying varnish with a brush. Bad distractors:", + "bbox": [ + 181, + 531, + 810, + 555 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- The person is wearing black gloves on their hands while applying paint with a roller. .. More examples & formatting ...", + "bbox": [ + 181, + 556, + 810, + 580 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "H PLM-STC Details", + "text_level": 1, + "bbox": [ + 171, + 609, + 366, + 626 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We present PLM Spatio-Temporal Captions (PLM-STC), a novel dataset aimed at training and evaluating VLMs for spatial-temporal reasoning. We collected pairs of mask tablets for objects in videos, along with their corresponding detailed temporal descriptions. The annotations are collected on top of the SA-V [124] videos, which are diverse and high-quality. We excluded the test set videos from SA-V, to avoid any data cross contamination. Table 20 provides statistics about the dataset, such as number of total samples, training/val/test splits, object types, and time-segment duration. PLM-STC, is not only novel, but also larger and higher quality compared to existing datasets, see Table 19. In Fig. 5 (right), we show an example of our spatio-temporal captions, describing a little girl (highlighted in blue): (frame 0-81): A little girl moves back as beluga whale approaches her face. (frame 82-85): Out of frame. (frame 86-98): She tries to feed the whale.", + "bbox": [ + 169, + 645, + 823, + 785 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We describe the overall annotation process in Appendix H.1, and how we build the three sub-tasks in Appendix H.2.", + "bbox": [ + 169, + 790, + 823, + 819 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "H.1 Annotation Process", + "text_level": 1, + "bbox": [ + 171, + 840, + 354, + 854 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "The annotation process is summarized in Figure 14. The annotation process involves three stages: Object Selection and Tracking, Temporal Segmentation and Captioning and Verification and Quality Control.", + "bbox": [ + 169, + 869, + 823, + 910 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/dce981b90649f3333bcbeecafe451dd31d77240bd5b444c91b0d0b02ac2b5874.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSpatial TypeYear#VideosRegionsTemp. Seg.Captions?
DAVIS16-RVOS [228]Segmentation20185050-No
DAVIS17-RVOS [229]Segmentation201890205-No
YouCook2-BB [83]BBox2018647-4.3KNo
A2D Sentence [230]Segmentation20183.7K4.8K-No
J-HMDB Sentence [231]Segmentation2018928928-No
ActivityNet Entities [232]BBox201914.3K1.5M52KNo
VidSTG [9]BBox20206.9K44.8K-No
Refer-Youtube-VOS [233]Segmentation20203.9K7.5K-No
HC-STVG [234]BBox202116K16K-No
VLN [123]Mouse Trace202350K43.1K43.1KYes
MeVis [235]Segmentation20232K8.8K-No
PLM-STCSegmentation202545.7K122.3K194.2KYes
", + "bbox": [ + 241, + 88, + 756, + 234 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Table 19: Spatio-Temporal-Captioning datasets comparison.", + "bbox": [ + 287, + 239, + 709, + 256 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/9d17dc536750b2f00b965dc2e9b92faf6895b4a18767e1761d0d1e8226c4a309.jpg", + "image_caption": [ + "Figure 14: PLM-STC Annotation pipeline." + ], + "image_footnote": [], + "bbox": [ + 173, + 277, + 823, + 333 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "H.1.1 Object Selection and Tracking", + "text_level": 1, + "bbox": [ + 171, + 390, + 441, + 406 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Annotators select interesting objects with significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. We instructed the annotators by defining interesting regions in video footage as those with the presence of significant, dynamic actions performed by subjects, which can be human, animal, or object. These regions involve multiple major actions that evolve over time, rather than static or insignificant actions. We provided annotators with examples of interesting regions, such as one featuring a person making a sandwich, a dog chasing a cat, or a kite getting stuck in a tree. The goal for the annotator is to identify regions with high delta, where the subject performs a sequence of significant activities that change over time, such as a person entering a room, sitting down, and then drinking from a glass. By focusing on these dynamic and evolving actions, annotators can effectively select regions worthy of captioning. Finally, annotators are provided with several examples of good and bad annotations.", + "bbox": [ + 169, + 417, + 826, + 583 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "H.1.2 Temporal Segmentation and Captioning", + "text_level": 1, + "bbox": [ + 171, + 609, + 509, + 625 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Based on the selected mask tablets, another set of annotators provides time segments for each action and fills in the caption within each time segment. The annotators are instructed to focus on capturing major actions, avoiding minor details or unnecessary movements. When writing captions for each segment, they must ensure clarity in describing the subject's movements and directionality. Additionally, the annotators are advised to avoid making assumptions about the subject's actions or adding details not clearly visible, sticking only to what is directly observable in the frame. As in the previous task, the annotators are provided with several examples of good and bad annotations to guide their work.", + "bbox": [ + 169, + 637, + 826, + 748 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "H.1.3 Verification and Quality Control", + "text_level": 1, + "bbox": [ + 171, + 772, + 455, + 787 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "A final set of annotators manually verifies the tablets and time-segment captions to ensure accuracy and consistency. For mask refinement, we re-run the same pipeline as §H.1.1, while not letting the annotators choose the interesting object, but only refine the quality of the mask. For captioning refinement, the annotators are tasked with three objectives: 1) Redundancy: eliminate any repeating or redundant information to ensure the caption is concise; 2) Accuracy: verify that every word in the caption accurately describes a fact present in the video, correcting or removing any incorrect information; and 3) Actions: add missing major action information to the caption while preserving existing atomic actions, ensuring the caption effectively conveys the key events in the video.", + "bbox": [ + 169, + 800, + 823, + 912 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/4d825ae476a074dbec45eef2c486a29543783462afb9b5ea512cff79f913689b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
AllTrainValTest
Dataset stats
Number of Videos45.2K42.0K8042.3K
Spatio Temporal Caption127.8K---
Temporal Caption198.7K---
Tube's categories
Person104.5K99.6K8612.4K
Animal16.8K13.2K5501.7K
Object/things6.4K4.4K4361.2K
Temporal captions per Tube
1 caption per tube78.9K73.9K8422.4K
2 caption per tube30.9K27.8K5661.7K
3 or more Caption per tube16.38K14.15K4211.2K
Tasks stats
Region Detailed Captioning (RDCap)122.3K117.2K2.5K2.6K
Region Captioning (RCap)194.2K179.5K4.6K10.1K
Region Temporal Localization (RTLoc)192.0K179.5K4.6K7.9K
", + "bbox": [ + 287, + 88, + 705, + 304 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Table 20: PLM-STC dataset statistics. Note the for RTLoc, we filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap.", + "bbox": [ + 169, + 306, + 823, + 349 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "H.2 PLM-STC Benchmark", + "text_level": 1, + "bbox": [ + 171, + 387, + 379, + 402 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We utilize the collected data to train and evaluate the PLM on three challenging tasks that are essential for video perception. Firstly, we created a balanced validation and test split by the combination of tube categories and number of caption per tube while making sure no video overlaps with the training set. This is done to make sure we evaluate all the categories presents in the dataset equally. Then, we process the data for each task:", + "bbox": [ + 169, + 414, + 823, + 484 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Dense Video Region Captioning (RDCap). This comprehensive task combines both \"what\" and \"when\" aspects. The model takes the video and the tubelets as input and outputs the full time-segment captions. We also assign an out of frame caption to temporal segments for which the subject does not appear in the video to ensure dense temporal coverage of events across the video duration.", + "bbox": [ + 169, + 489, + 823, + 546 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Video Region Captioning (RCap). This task involves describing \"what\" activities are performed within a specific time frame by the objects in the tubelets. The model receives the video, the tubelets, and the temporal region as input and outputs the corresponding captions. We filter out events that refer to the subject when it is out-of-frame to avoid evaluating trivial captions.", + "bbox": [ + 169, + 551, + 826, + 608 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Region Temporal Localization (RTLoc). This task requires the model to localize \"when\" specific events occur in relation to a given tubelet. The input includes the video, the tubelet, and the caption, while the output is the start and end frames indicating when the captioned event occurs. Like RCap, we filter out out-of-frame events, as well as ambiguous events that may be localized to multiple time segments. For example, if the subject opens the door twice, the event text is guaranteed to be unique (e.g., referring to the first and second time they opened the door) or dropped entirely if ambiguous (e.g., if the text only mentions the action).", + "bbox": [ + 169, + 614, + 826, + 712 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "These tasks are designed to both improve and evaluate the model's capabilities, with the same input-output format applied during both training and evaluation. Figure 6 illustrate an examples of the task, including the prompt used to train and evaluate the PLM.", + "bbox": [ + 169, + 717, + 826, + 760 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "I Smart Glasses Data", + "text_level": 1, + "bbox": [ + 171, + 782, + 369, + 797 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "I.1 Data collection and annotation", + "text_level": 1, + "bbox": [ + 171, + 815, + 424, + 829 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We collected the source videos for PLM-SGQA using commercial smart glasses, which enable participants to capture egocentric videos in a hands-free manner. Participants are presented with 14 categories of popular scenarios, such as shopping, cooking, and walking in a neighborhood, and are instructed to ask questions about their surroundings as if interacting with a multi-modal assistant that shares their visual perspective. Specifically, participants are asked to ask questions spontaneously,", + "bbox": [ + 169, + 842, + 826, + 912 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "without delay, about the things they see and experience, and to focus on visual queries rather than dynamic information that may change regularly. After recording the videos, participants annotate the segments by marking the start and end points of the video relevant to each question, as well as providing the ground-truth answer.", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "I.2 SGQA Benchmark", + "text_level": 1, + "bbox": [ + 171, + 162, + 344, + 178 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To create the SGQA component of PLM-VideoBench we first filtered the Q&As using an LLM to obtain a shortlist of questions that focus on human activity and also are perception-based rather than based on general knowledge. This means that SGQA focus on questions that require good visual understanding of the scene to be accurately answered. This process yields an evaluation set consisting of 655 Q&As. For the resulting Q&As, we then trimmed the original videos to obtain clips within the temporal boundary that the human wearer/annotator specified. As the annotated segments end at the point where the smart-glass wearer asks the question, it is important for all evaluations to specify that the question refers to the end of the video clip - e.g. see the prompt we used for PLM and baselines evaluation in 10. We summarize the statistics of the SGQA test set in Figures 15 and 16.", + "bbox": [ + 169, + 188, + 826, + 313 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/9a4eb11215c651515e62e6429fc7934bc585298496e5b030e5de5e0d8c25b3b3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Sources stats
Total Videos663
Average Duration (sec.)29.4
Annotations stats
Number of QA Pairs665
Number Domains14
Question Length (avg/max)9.0 / 52
Answer Length (avg/max)21.6 / 40
Annotation TypeHuman
Open-DomainYes
", + "bbox": [ + 207, + 347, + 415, + 460 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Figure 15: Statistics of the PLMSGQA test data.", + "bbox": [ + 205, + 467, + 418, + 494 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/deb55031b0dcd69c607f38cdac47b1dcfd24e19c9457a8c15e649704593f1dbe.jpg", + "image_caption": [ + "Figure 16: Domain distribution of video-clips in PLMSGQA." + ], + "image_footnote": [], + "bbox": [ + 423, + 333, + 785, + 476 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "J Synthetic Data Engine", + "text_level": 1, + "bbox": [ + 171, + 520, + 393, + 537 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Our data engine targets base capabilities of VLMs: image captioning, visual question answering, OCR, chart/diagram understanding, and video understanding. We developed different pipelines for images and videos, and includes different levels of metadata to generate captions and QAs.", + "bbox": [ + 169, + 551, + 823, + 594 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Image Captions: We caption high-quality images using Llama 3.1V 90B. An example is shown in Figure 17. We use this pipeline to caption SA1B [105], Object365 [135], and OpenImages [136].", + "bbox": [ + 169, + 599, + 823, + 628 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "OCR QAs: We leverage pre-extracted OCR and use it as input for a LLM (i.e., Llama 3.3 70B) to generate a set of five question-answer pairs. An example is shown in Figure 18. We use this pipeline to generate QAs for PDFAcc [132], and UCSF [133].", + "bbox": [ + 169, + 633, + 823, + 676 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Image Captioning plus QAs: In cases for which OCR does not provide enough information to create questions (e.g., scientific figures), we further caption the image using Llama 3.1V 90B. Then we pass the caption with auxiliary metadata (e.g., OCR) to a LLM (i.e., Llama 3.3 70B) to generate question-answers pairs. An example is shown in Figure 19). We use this pipeline to generate captions and QAs for ArxivQA [59], DocVQA [53], InfoVQA [56] and Ai2d [55].", + "bbox": [ + 169, + 681, + 823, + 753 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Video Captioning plus QAs: An image captioner is run on key-frames of the video, as well as a video captioner on the overall video at 1 fps. The result captions are passed to a LLM (i.e., Llama 3.3 70B, or Llama 3 405B) with additional metadata (e.g., video title etc.), so to generate a detailed caption and a multiple-chosen question answers pair. An example is shown in Figure 20). We use this pipeline to generate captions and QAs for YT-1B [236], Ego4d [115] $^{3}$ , DiDeMo [140], Charades [139], and Kinetics710 [121] $^{4}$ .", + "bbox": [ + 169, + 758, + 826, + 842 + ], + "page_idx": 30 + }, + { + "type": "page_footnote", + "text": "3For this dataset we used Llama3 405B, rather than Llama 3.3 70B", + "bbox": [ + 192, + 869, + 589, + 883 + ], + "page_idx": 30 + }, + { + "type": "page_footnote", + "text": "4DiDeMo, Charades, Kinetics710 used a simpler pipeline where only frame captions were used, and a smaller scale LLM (i.e., Llama 3.1 8B)", + "bbox": [ + 169, + 883, + 823, + 910 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Detailed Caption", + "text_level": 1, + "bbox": [ + 565, + 104, + 686, + 118 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/1268ac9fb233bb4929bb4dab04092dfd0cbace7e85b8ed3fbef0c19086670027.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 145, + 403, + 279 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The image depicts a yellow building with a red door and a sign that reads \"YOLCU SALONU\" in black letters. The building has a gable roof and a small window on the left side. There are two trees in front of the building, one on each side of the entrance. The ground is wet, and there is snow on the trees and the ground. In the background, there is a train station with a train on the tracks. The sky is gray and overcast. The image appears to be a photograph taken on a cold winter day. The presence of snow and the gray sky suggest that it is a winter scene. The building and the train station in the background suggest that it is a public place, possibly a train station or a bus stop. The sign on the building reads \"YOLCU SALONU,\" which is Turkish for \"Passenger Lounge.\" This suggests that the building is a waiting area for passengers. Overall, the image depicts a quiet and peaceful winter scene, with a focus on the building and the surrounding environment.", + "bbox": [ + 416, + 123, + 844, + 299 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/31a57553c561071222c0d6a49171e30a665fd047b52d892d96493b8bb064d276.jpg", + "image_caption": [ + "Figure 17: Detailed caption example, and the corresponding pipeline. The captioner (i.e., Llama 3V 90B) is prompted to generate the caption for the provided image." + ], + "image_footnote": [], + "bbox": [ + 316, + 306, + 700, + 340 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "OCR", + "text_level": 1, + "bbox": [ + 655, + 416, + 694, + 429 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/4fe6fb4b3ec1c97204688a86055c7da0d2130a401a1b67f4a929d0c3a451e99b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 435, + 844, + 599 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Question Answers", + "text_level": 1, + "bbox": [ + 442, + 604, + 573, + 617 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Q: How many teams qualify for the finals?", + "A: The top 3 teams from each section, plus the next two highest teams on points, qualify for the finals, making a total of 5 teams.", + "Q: How many points has the Buderim team earned?", + "A: The Buderim team has earned 150 points.", + "Q: Which team is currently in the lead?", + "A: The Albert team is currently in the lead with 36 points and a margin of 39.", + "Q: What is the current margin of the Mooloolaba team?", + "A: The Mooloolaba team has a current margin of 1.", + "Q: What is the overall summary of the document?", + "A: The document appears to be a standings table for the Ladies Division Three of a bowling or lawn bowls competition, showing the current standings of various teams, including their win-loss records, points, and margins." + ], + "bbox": [ + 181, + 623, + 833, + 787 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Pipeline", + "text_level": 1, + "bbox": [ + 478, + 801, + 537, + 815 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/0cee851dbcab299eafe247b722f18feb598d44c3a07241c1e4767a2c4fff2798.jpg", + "image_caption": [ + "Figure 18: Visual Question Answering pairs and the corresponding pipeline. The OCR text is extracted from the image, and passed to the LLM (i.e., Llama 3.3 70B) to generate QA pairs." + ], + "image_footnote": [], + "bbox": [ + 313, + 821, + 702, + 857 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "OCR", + "text_level": 1, + "bbox": [ + 655, + 210, + 694, + 223 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/8e79177a0ff95b0e8f386b3c911fa139e72510a8be6c7f623a43f333b8042601.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 229, + 436, + 362 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "SB 1011 10 Fraction of objects 0.2 0.1 0.0 0.01 0.1 1.0 10.0 erg s1", + "bbox": [ + 514, + 281, + 843, + 309 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Detailed Caption", + "text_level": 1, + "bbox": [ + 446, + 364, + 568, + 378 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The image depicts a histogram of the distribution of objects, with the x-axis labeled \"LSB[1046ergs-1]\" and the y-axis labeled \"Fraction of objects.\" The x-axis ranges from 0.01 to 10.0, while the y-axis ranges from 0.0 to 0.2. The histogram is divided into bins of varying widths, with the first bin spanning from 0.01 to 0.1, the second bin spanning from 0.1 to 1.0, and so on. Each bin contains a bar representing the fraction of objects within that range. The bars are shaded in gray, with some bins having multiple bars. A key feature of the histogram is the presence of a peak in the middle bin, which corresponds to an $L_{SB}$ value of around 1.0. This suggests that the majority of objects have an $L_{SB}$ value close to this value. The histogram also shows a tail extending towards higher $L_{SB}$ values, indicating that some objects have significantly higher $L_{SB}$ values than the majority. Overall, the histogram provides a visual representation of the distribution of $L_{SB}$ values among the objects being studied. It allows for easy identification of patterns and trends in the data, such as the peak in the middle bin and the tail towards higher $L_{SB}$ values.", + "bbox": [ + 179, + 385, + 836, + 525 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Multi-Choice Question Answer (MCQA)", + "text_level": 1, + "bbox": [ + 364, + 529, + 651, + 544 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "What is the approximate $L_{SB}$ value at which the majority of objects have their peak?", + "bbox": [ + 181, + 547, + 686, + 561 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Options:", + "(A) 0.1", + "(B) 1.0", + "(C) 5.0", + "(D) 10.0", + "Answer: (B) 1.0." + ], + "bbox": [ + 183, + 561, + 284, + 636 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Pipeline", + "text_level": 1, + "bbox": [ + 477, + 641, + 537, + 655 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/e412ee6336c6f3f3ecb37e27ff8cad8c9ab87a00109169edfb9921efea2bb8d3.jpg", + "image_caption": [ + "Figure 19: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. The OCR text is extracted from the image, and the caption is generated by the captioner (i.e., Llama 3V 90B), which are all passed to the LLM (i.e., Llama 3.3 70B) to generate MCQAs." + ], + "image_footnote": [], + "bbox": [ + 215, + 660, + 802, + 736 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/7a2b9a755d06734efc06641a316b5956bffb59da9244e42a1b4b231e624b84af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 116, + 834, + 188 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Metadata", + "text_level": 1, + "bbox": [ + 472, + 191, + 544, + 205 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Title: Lions VS Colts Highlights 2017 Preseason Game", + "bbox": [ + 181, + 210, + 514, + 224 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Description: Comment suggestions for future videos and Enjoy!", + "bbox": [ + 181, + 224, + 566, + 238 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Frame Caption", + "text_level": 1, + "bbox": [ + 452, + 241, + 563, + 256 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Frame 435: The image shows a man with dreadlocks standing in front of a crowd of people in a stadium. He is wearing a white t-shirt and is surrounded by a group of people standing on the ground. On the left side of the image, there is a table fan, bottles, and other objects placed on a table. In the background, there are people sitting on chairs, stairs, railings, boards with text, lights, and the sky. The text on the boards reads \"Indianapolis Colts vs San Francisco 49ers", + "bbox": [ + 181, + 260, + 834, + 323 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Frame 585: The image shows a football game being played on a TV screen, with a group of people on the ground and a few people standing in the background. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions.", + "bbox": [ + 181, + 323, + 833, + 361 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Frame 765: The image shows a group of people playing a game of football on a green field, with white lines marking the boundaries of the field. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions.", + "bbox": [ + 181, + 361, + 833, + 398 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Frame 945: The image shows a football game being played on a TV screen, with people wearing helmets and playing on the ground. At the bottom of the image, there is text and numbers indicating that the game is between the Detroit Lions and the Indianapolis Colts.", + "bbox": [ + 181, + 398, + 833, + 436 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Video Caption", + "text_level": 1, + "bbox": [ + 455, + 441, + 560, + 455 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Football players wearing helmets, Detroit Lions vs Indianapolis Colts, player running with ball, falls down, touchdown scored.", + "bbox": [ + 181, + 460, + 834, + 484 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Detailed Caption", + "text_level": 1, + "bbox": [ + 446, + 488, + 568, + 503 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "A football player is running with the ball and then falls down, the game is between the Detroit Lions and the Indianapolis Colts, with players wearing blue and white uniforms and helmets, and people sitting in the stadium, a watermark on the image shows the teams playing, one player is laying on the ground next to another player wearing a white and blue jersey and a white helmet, a horseshoe-shaped object is on the grass, text on the image indicates a touchdown, players are wearing helmets and playing on the ground, with watermarks in the bottom left corner of the images, the game involves the Detroit Lions and the Indianapolis Colts, and also shows the Colts playing against the New England Patriots and the Lions playing against the Dallas Cowboys.", + "bbox": [ + 181, + 507, + 834, + 595 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Multi-Choice Question Answer (MCQA)", + "text_level": 1, + "bbox": [ + 364, + 599, + 651, + 614 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "What is the outcome of the football player's action?", + "bbox": [ + 181, + 619, + 488, + 633 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Options:", + "bbox": [ + 183, + 633, + 235, + 645 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(A) the player runs out of bounds", + "(B) the player scores a touchdown", + "(C) the player drops the ball", + "(D) the player falls down", + "(E) the player gets injured", + "Answer: (B) the player scores a touchdown." + ], + "bbox": [ + 183, + 646, + 442, + 720 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Pipeline", + "text_level": 1, + "bbox": [ + 477, + 724, + 537, + 739 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/6ae83b9a6dc806a8b301beb09601202a299d8e4d55734c7e55952388176da2f3.jpg", + "image_caption": [ + "Figure 20: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. An LLM (i.e., Llama 3.3 70B) with a video caption, frame captions at 1 fps, and additional metadata (e.g., video title etc.), generates a detailed caption and multiple-choices question answers pair." + ], + "image_footnote": [], + "bbox": [ + 217, + 744, + 802, + 818 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "K Qualitative Results", + "text_level": 1, + "bbox": [ + 173, + 89, + 370, + 107 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/1530b58b8d9bea8c5654bbea62fd379cc6feae8a7b23471dfe80e80aea475eb0.jpg", + "image_caption": [ + "Figure 21: PLM-8B OCR examples." + ], + "image_footnote": [], + "bbox": [ + 196, + 162, + 380, + 378 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Question: Provide a detailed description of the image.", + "bbox": [ + 416, + 130, + 741, + 143 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: The image is a pink postage stamp featuring a map of the Grenadines of St. Vincent, with a red border and black text. The map shows various locations on the island, including hotels, bays, and other points of interest.", + "bbox": [ + 416, + 143, + 844, + 194 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "*Key Features:*", + "bbox": [ + 418, + 194, + 514, + 207 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* *Map:* A pink map of the Grenadines of St. Vincent, with a red border and black text.", + "* *Locations:* Various locations on the island are marked, including hotels, bays, and other points of interest.", + "* *Scale:* A scale in miles is provided at the bottom of the map.", + "* *Denomination:* The denomination \"30c\" is printed in the bottom-right corner of the stamp.", + "* *Border:* The stamp has a red border with a serrated edge." + ], + "bbox": [ + 416, + 218, + 843, + 320 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "*Overall Impression:*", + "bbox": [ + 416, + 330, + 552, + 345 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The image appears to be a vintage postage stamp from the Grenadines of St. Vincent, featuring a map of the island and various locations of interest. The stamp has a classic design and is likely from the mid-20th century.", + "bbox": [ + 416, + 357, + 844, + 407 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/ed04d39d909fe72eeb12444135a8e509aba0d6b6dc41389801c5e33f2f769e55.jpg", + "image_caption": [ + "Figure 22: PLM-8B OCR in the wild examples." + ], + "image_footnote": [], + "bbox": [ + 326, + 455, + 488, + 545 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Question: What does the word say together from the bikers?", + "bbox": [ + 504, + 469, + 671, + 507 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: The word says Indianapolis.", + "bbox": [ + 504, + 508, + 673, + 534 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/00a522bfdf47031dcbd13b64c0084f5b20a14508fbc9c3cea078addb8671c68b.jpg", + "image_caption": [ + "Figure 23: PLM-8B OCR in the wild examples." + ], + "image_footnote": [], + "bbox": [ + 364, + 595, + 450, + 713 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Question: How tall is the. \nkitty character in the image?", + "bbox": [ + 504, + 623, + 673, + 648 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: The kitty character is approximately $100\\mathrm{cm}$ tall.", + "bbox": [ + 504, + 648, + 673, + 686 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/0b422e57894781ed7c26024a43c39c17fb9552b27d825a07a257853b593a0c99.jpg", + "image_caption": [ + "Figure 24: PLM-8B Natural image QA" + ], + "image_footnote": [], + "bbox": [ + 341, + 179, + 475, + 337 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Question: What is inside the oven?", + "bbox": [ + 506, + 176, + 671, + 202 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: There is a baking sheet inside the oven.", + "bbox": [ + 506, + 203, + 671, + 227 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Question: What temperature is the oven set to?", + "bbox": [ + 506, + 228, + 674, + 252 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: The oven is set to 200 degrees.", + "bbox": [ + 506, + 253, + 671, + 277 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Question: What unit is the oven's temperature set to?", + "bbox": [ + 506, + 277, + 671, + 303 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: The oven's temperature is set to Celsius.", + "bbox": [ + 506, + 304, + 674, + 328 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/507366e34ef9bdd8494b0097e739e8fea6443778365fe28bca28c076dd89537e.jpg", + "image_caption": [ + "Figure 25: PLM-8B Captioning." + ], + "image_footnote": [], + "bbox": [ + 303, + 547, + 511, + 797 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Question: Describe the image in details", + "bbox": [ + 553, + 553, + 722, + 578 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: A city skyline with a large body of water in the foreground. The city skyline is made up of tall buildings, some of which are skyscrapers. The buildings are densely packed together and vary in height and color. In the foreground, there is a large body of water that appears to be a harbor or bay. There are many boats and ships in the water, some of which are docked at the shore while others are sailing or anchored in the middle of the water.", + "bbox": [ + 553, + 579, + 722, + 792 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/4c4e50b1e9bf713125cd4b7e2fc1c66361692d068cfaa23f80097b03c2b8e462.jpg", + "image_caption": [ + "RDCap" + ], + "image_footnote": [], + "bbox": [ + 181, + 176, + 834, + 329 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Question: Describe the subject's actions within the red rectangles, providing start and end frames ids and a brief description for each action in a dense caption format. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video.", + "bbox": [ + 181, + 351, + 834, + 390 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Answer: Frame [0, 6]: Out of frame", + "bbox": [ + 183, + 390, + 400, + 402 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Frame [6, 15]: A woman is walking towards the camera with a child. She is holding the child's hand with her right hand and carrying a bag in her left hand", + "bbox": [ + 183, + 402, + 834, + 428 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Frame [16, 31]: Out of frame.", + "bbox": [ + 183, + 428, + 361, + 440 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "RCap", + "text_level": 1, + "bbox": [ + 486, + 467, + 529, + 482 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Question: What is happening to the subject in the region highlighted by the red rectangle during frames [6, 15] in this 32 frame video?", + "bbox": [ + 181, + 486, + 833, + 511 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Answer: a woman is walking on a muddy road with a child. she is holding the child's hand with her right hand and adjusting her saree with her left hand while walking", + "bbox": [ + 183, + 512, + 834, + 537 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "RTLoc", + "text_level": 1, + "bbox": [ + 482, + 542, + 534, + 556 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Question: Please provide the start and end frame of when 'an individual standing near a water body is drinking a from a water from a bottle' happens corresponding to the region marked by the blue rectangle. Use the format [start, end] where start and end are frame numbers between 0 and 31 in this 32 frame video. Answer: [23, 26]", + "bbox": [ + 181, + 560, + 834, + 612 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Captioning", + "text_level": 1, + "bbox": [ + 467, + 614, + 550, + 630 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Question: Please describe the video in detail, focusing on actions.", + "bbox": [ + 181, + 633, + 575, + 648 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Answer: This video features a group of people walking around a muddy area with a small bridge in the background. The video is taken outdoors and the weather is sunny. The video starts with a man standing on the left side of the frame, facing the camera. He is wearing a white shirt, a black vest, black trousers and white shoes. He has a scarf around his neck. The camera pans to the right and shows a group of people walking around. The video is taken from a low angle and the camera is handheld.", + "bbox": [ + 181, + 648, + 834, + 710 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The video ends with a man walking towards the left side of the frame. He is wearing a blue shirt, black trousers and brown shoes. He is holding a bottle in his right hand and is drinking from it.", + "bbox": [ + 181, + 722, + 834, + 750 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Fine-grained QA", + "text_level": 1, + "bbox": [ + 447, + 753, + 568, + 768 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Question: Please explain what the man in blue t-shirt is pointing at in the video.", + "bbox": [ + 181, + 771, + 656, + 785 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Answer: The man in blue t-shirt is pointing at the bridge.", + "bbox": [ + 181, + 785, + 522, + 797 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Figure 26: PLM-8B Video Region Captioning, Grounding and Fine-grained QA.", + "bbox": [ + 230, + 806, + 761, + 823 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/4736a69fcb2b8863af5879c939b7d3dfa72f0cdb4c8b8682d4d8f4a18ab8fb9d.jpg", + "image_caption": [ + "Figure 27: PLM-8B Video Captioning" + ], + "image_footnote": [ + "Question: Please give me a details of the video focusing on actions and interactions.", + "Answer: This video features a close up shot of a hand holding a plush toy mouse with a grey kitten standing on a white chair, reaching out and touching the toy with its paws. In the beginning of the video, the kitten is seen standing on the chair and reaching out with both paws to touch the toy mouse. The kitten then withdraws its paws and sits back on the chair. The hand holding the toy mouse is seen moving the toy closer to the kitten and then away from the kitten. The kitten then reaches out again and touches the toy with its paws. The hand then moves the toy away from the kitten again and the kitten looks to the left of the frame.", + "This video is set against a white background." + ], + "bbox": [ + 181, + 207, + 834, + 280 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/aa12bcb8edff9ba1ae7ef304cae20debee36198b18474ddd9e63ef6b76c6cf17.jpg", + "image_caption": [ + "Figure 28: PLM-8B Video QA" + ], + "image_footnote": [ + "Question: What are the ingredients being thrown into the mixer?", + "Answer: Carrots, apples, and bananas." + ], + "bbox": [ + 181, + 665, + 834, + 739 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "L Limitations and Future Work", + "text_level": 1, + "bbox": [ + 171, + 89, + 457, + 107 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Our PLM models achieve strong performance against open-data baselines and proprietary models alike, however there is still room for improvement in both modeling and data. On the model front, we do not experiment extensively with long video modeling components (e.g., token compression, dynamic temporal resolution). As a result, our performance on long video benchmarks [92, 94, 96] is less competitive (see Table F). PLM is compatible with such newer advancements and can be incorporated in future work.", + "bbox": [ + 169, + 119, + 826, + 204 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Additionally, our results are sensitive to the characteristics of the base LLM. We see especially low performance of PLM on benchmarks such as MMMU [37], MME [41] and Video-MME [75] (see Tables 3 and 4), where the strongest baselines often rely on LLMs that are more verbose, but also have a likely much larger language component (see the gap to proprietary models on some benchmarks). We also note that our model performs relatively poorly on our SGQA task (Table 5), targeting a mix of perception and knowledge based questions to smart glasses. Strong chatbot-focused systems like GPT-4o excel at tasks that go beyond core perception.", + "bbox": [ + 169, + 210, + 826, + 308 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "On the data front, our mix focuses squarely on visual perception — it does not include for example, multi-step reasoning, robotics or world-knowledge data. Despite these limitations, PLM contributes new capabilities and strong benchmark results, and set a new standard for fully reproducible VLMs.", + "bbox": [ + 169, + 313, + 826, + 357 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "M Broader Impact", + "text_level": 1, + "bbox": [ + 171, + 375, + 349, + 393 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Our work aims to advance open and reproducible research in vision-language modeling by releasing models, data, and benchmarks that support open research. By not having any distillation from proprietary models, we hope to improve reproducible and transparent training and evaluation of VLM research. However, like all MLLMs, our Perception Language Model (PLM) may have some risks. Even by carefully selecting datasets and apply several mitigation (CSAM, NSFW, etc.), the model may still contain hidden biases or generate inappropriate or harmful content. We took steps to reduce these risks by teaching the model to refuse answering questions related to bias, harassment, or adult content. We also remove all samples containing any mention of human faces from all the datasets.", + "bbox": [ + 169, + 406, + 826, + 517 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "We also annotate and release a large-scale dataset for fine-grained video question answering and spatio-temporal grounding. This release has the potential to significantly advance research in image and video understanding. Making the dataset openly available allows others to reproduce our work and invites broader community involvement. This transparency supports safer and more accountable progress, helping researchers better understand and address potential biases or limitations.", + "bbox": [ + 169, + 523, + 825, + 594 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "We believe that by openly sharing our models and data, while actively addressing ethical concerns, our work can contribute positively to vision-language research.", + "bbox": [ + 169, + 598, + 826, + 628 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 104 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023.", + "[2] Erfei Cui, Yinan He, Zheng Ma, Zhe Chen, Hao Tian, Weiyun Wang, Kunchang Li, Yi Wang, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, Yali Wang, Limin Wang, Yu Qiao, and Jifeng Dai. Sharegpt-4o: Comprehensive multimodal annotations with gpt-4o, 2024.", + "[3] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. In European Conference on Computer Vision, pages 370-387. Springer, 2024.", + "[4] Farre Miquel, Marafioti Andres, Tunstall Lewis, von Werra Leandro, Conghui He, Cuenca Pedro, and Wolf Thomas. Finevideo: behind the scenes, 2024.", + "[5] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data, 2024.", + "[6] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, Li Yuan, Yu Qiao, Dahua Lin, Feng Zhao, and Jiaqi Wang. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024.", + "[7] Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle-2: Faster inference of language models with dynamic draft trees, 2024b. URL https://arxiv.org/abs/2406.16858, 2024.", + "[8] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, and Jingjing Liu. Hero: Hierarchical encoder for video+ language omni-representation pre-training. arXiv preprint arXiv:2005.00200, 2020.", + "[9] Zhu Zhang, Zhou Zhao, Yang Zhao, Qi Wang, Huasheng Liu, and Lianli Gao. Where does it exist: Spatio-temporal video grounding for multi-form sentences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10668-10677, 2020.", + "[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024.", + "[11] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Jen Dumas, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weis, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024.", + "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava- next: Improved reasoning,OCR,and world knowledge, January 2024.", + "[13] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "[14] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Anwen Hu, Haowei Liu, Qi Qian, Ji Zhang, and Fei Huang. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13040–13051, 2024.", + "[15] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023." + ], + "bbox": [ + 179, + 112, + 825, + 910 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. arXiv preprint arXiv:2204.14198, 2022.", + "[17] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pretraining for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024.", + "[18] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024.", + "[19] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024.", + "[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023.", + "[21] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023.", + "[22] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Khan. Videogpt+: Integrating image and video encoders for enhanced video understanding. arXiv preprint arXiv:2406.09418, 2024.", + "[23] Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023.", + "[24] Jiajun Liu, Yibing Wang, Hanghang Ma, Xiaoping Wu, Xiaoqi Ma, Xiaoming Wei, Jianbin Jiao, Enhua Wu, and Jie Hu. Kangaroo: A powerful video-language model supporting long-context video input. arXiv preprint arXiv:2408.15542, 2024.", + "[25] Xiaoqian Shen, Yunyang Xiong, Changsheng Zhao, Lemeng Wu, Jun Chen, Chenchen Zhu, Zechun Liu, Fanyi Xiao, Balakrishnan Varadarajan, Florian Bordes, et al. Longvu: Spatiotemporal adaptive compression for long video-language understanding. arXiv preprint arXiv:2410.17434, 2024.", + "[26] Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2025.", + "[27] Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024.", + "[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024.", + "[29] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024.", + "[30] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.", + "[31] Rohan Choudhury, Guanglei Zhu, Sihan Liu, Koichiro Niinuma, Kris M Kitani, and László Jeni. Don't look twice: Faster video transformers with run-length tokenization. arXiv preprint arXiv:2411.05222, 2024.", + "[32] OpenAI. Gpt-4v(ision) system card, 2023.", + "[33] OpenAI. Gpt-4o system card, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[34] Gemini Team Google. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023.", + "[35] Gemini Team Google. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.", + "[36] Anthropic. The claude 3 model family: Opus, sonnet, haiku. 2024.", + "[37] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9556-9567, 2024.", + "[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024.", + "[39] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-okvqa: A benchmark for visual question answering using world knowledge, 2022.", + "[40] Jeffrey P Bigham, Chandrika Jayant, Hanjie Ji, Greg Little, Andrew Miller, Robert C Miller, Robin Miller, Aubrey Tatarowicz, Brandyn White, Samual White, et al. Vizwiz: nearly real-time answers to visual questions. In Proceedings of the 23nd annual ACM symposium on User interface software and technology, pages 333-342, 2010.", + "[41] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023.", + "[42] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023.", + "[43] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.", + "[44] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. In European Conference on Computer Vision, pages 148-166, 2025.", + "[45] xai. RealworldQA benchmark. https://huggingface.co/datasets/xai-org/RealworldQA, 2024.", + "[46] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024.", + "[47] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024.", + "[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024.", + "[49] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014.", + "[50] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8948-8957, 2019.", + "[51] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2014.", + "[52] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019." + ], + "bbox": [ + 181, + 90, + 825, + 911 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[53] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. Docvqa: A dataset for vqa on document images. In 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 2199-2208, 2021.", + "[54] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. arXiv preprint arXiv:2407.21038, 2024.", + "[55] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016.", + "[56] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C. V. Jawahar. Infographicvqa. In 2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 2582-2591, 2022.", + "[57] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024.", + "[58] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023.", + "[59] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024.", + "[60] Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. From recognition to cognition: Visual commonsense reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6720-6731, 2019.", + "[61] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 2507-2521. Curran Associates, Inc., 2022.", + "[62] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186, 2025.", + "[63] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023.", + "[64] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024.", + "[65] Jierun Chen, Fangyun Wei, Jinjing Zhao, Sizhe Song, Bohuai Wu, Zhuoxuan Peng, S-H Gary Chan, and Hongyang Zhang. Revisiting referring expression comprehension evaluation in the era of large multimodal models. arXiv preprint arXiv:2406.16866, 2024.", + "[66] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017.", + "[67] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024.", + "[68] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023.", + "[69] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021.", + "[70] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024." + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[71] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024.", + "[72] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. Star: A benchmark for situated reasoning in real-world videos. In Thirty-fifth Conference on Neural Information Processing Systems (NeurIPS), 2021.", + "[73] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. Tgif-qa: Toward spatiotemporal reasoning in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2758–2766, 2017.", + "[74] Jie Lei, Licheng Yu, Mohit Bansal, and Tamara L Berg. Tvqa: Localized, compositional video question answering. arXiv preprint arXiv:1809.01696, 2018.", + "[75] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal ILms in video analysis. arXiv preprint arXiv:2405.21075, 2024.", + "[76] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 9127–9134, 2019.", + "[77] Munan Ning, Bin Zhu, Yujia Xie, Bin Lin, Jiaxi Cui, Lu Yuan, Dongdong Chen, and Li Yuan. Video-bench: A comprehensive benchmark and toolkit for evaluating video-based large language models. arXiv preprint arXiv:2311.16103, 2023.", + "[78] Jianrui Zhang, Mu Cai, and Yong Jae Lee. Vinoground: Scrutinizing Imms over dense temporal reasoning with short videos. arXiv preprint arXiv:2410.02763, 2024.", + "[79] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024.", + "[80] Daniel Cores, Michael Dorkenwald, Manuel Mucientes, Cees GM Snoek, and Yuki M Asano. Tvbench: Redesigning video-language evaluation. arXiv preprint arXiv:2410.07752, 2024.", + "[81] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016.", + "[82] David Chen and William B Dolan. Collecting highly parallel data for paraphrase evaluation. In Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies, pages 190-200, 2011.", + "[83] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI conference on artificial intelligence, volume 32, 2018.", + "[84] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4581-4591, 2019.", + "[85] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017.", + "[86] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024.", + "[87] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D Manning. Auroracap: Efficient, performant video detailed captioning and a new benchmark. arXiv preprint arXiv:2410.03051, 2024.", + "[88] Yuxuan Wang, Yueqian Wang, Dongyan Zhao, Cihang Xie, and Zilong Zheng. Videohallucer: Evaluating intrinsic and extrinsic hallucinations in large video-language models. arXiv preprint arXiv:2406.16338, 2024.", + "[89] Jiacheng Zhang, Yang Jiao, Shaoxiang Chen, Jingjing Chen, and Yu-Gang Jiang. Eventhallusion: Diagnosing event hallucinations in video llms. arXiv preprint arXiv:2409.16597, 2024." + ], + "bbox": [ + 181, + 90, + 825, + 911 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[90] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2024.", + "[91] Ruchit Rawal, Khalid Saifullah, Miquel Farré, Ronen Basri, David Jacobs, Gowthami Somepalli, and Tom Goldstein. Cinepile: A long video question answering dataset and benchmark. arXiv preprint arXiv:2405.08813, 2024.", + "[92] Weihan Wang, Zehai He, Wenyi Hong, Yean Cheng, Xiaohan Zhang, Ji Qi, Xiaotao Gu, Shiyu Huang, Bin Xu, Yuxiao Dong, et al. Lvbench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024.", + "[93] Makarand Tapaswi, Yukun Zhu, Rainer Stiefelhagen, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. Movieqa: Understanding stories in movies through question-answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4631–4640, 2016.", + "[94] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37:28828-28857, 2025.", + "[95] Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18221-18232, 2024.", + "[96] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024.", + "[97] Guo Chen, Yicheng Liu, Yifei Huang, Yuping He, Baoqi Pei, Jilan Xu, Yali Wang, Tong Lu, and Limin Wang. Cg-bench: Clue-grounded question answering benchmark for long video understanding. arXiv preprint arXiv:2412.12075, 2024.", + "[98] Orr Zohar, Xiaohan Wang, Yann Dubois, Nikhil Mehta, Tong Xiao, Philippe Hansen-Estruch, Licheng Yu, Xiaofang Wang, Felix Juefei-Xu, Ning Zhang, et al. Apollo: An exploration of video understanding in large multimodal models. arXiv preprint arXiv:2412.10360, 2024.", + "[99] Mu Cai, Reuben Tan, Jianrui Zhang, Bocheng Zou, Kai Zhang, Feng Yao, Fangrui Zhu, Jing Gu, Yiwu Zhong, Yuzhang Shang, et al. Temporalbench: Benchmarking fine-grained temporal understanding for multimodal video models. arXiv preprint arXiv:2410.10818, 2024.", + "[100] Ziyao Shangguan, Chuhan Li, Yuxuan Ding, Yanan Zheng, Yilun Zhao, Tesca Fitzgerald, and Arman Cohan. Tomato: Assessing visual temporal reasoning capabilities in multimodal foundation models. arXiv preprint arXiv:2410.23266, 2024.", + "[101] Wenyi Hong, Yean Cheng, Zhuoyi Yang, Weihan Wang, Lefan Wang, Xiaotao Gu, Shiyu Huang, Yuxiao Dong, and Jie Tang. Motionbench: Benchmarking and improving fine-grained video motion understanding for vision language models. arXiv preprint arXiv:2501.02955, 2025.", + "[102] Yuanxin Liu, Shicheng Li, Yi Liu, Yuxiang Wang, Shuhuai Ren, Lei Li, Sishuo Chen, Xu Sun, and Lu Hou. Tempcompass: Do video llms really understand videos? arXiv preprint arXiv:2403.00476, 2024.", + "[103] Mohammadreza Salehi, Jae Sung Park, Tanush Yadav, Aditya Kusupati, Ranjay Krishna, Yejin Choi, Hannaneh Hajishirzi, and Ali Farhadi. Actionatlas: A videoqa benchmark for domain-specialized action recognition. arXiv preprint arXiv:2410.05774, 2024.", + "[104] Daniel Bolya, Po-Yao Huang, Peize Sun, Jang Hyun Cho, Andrea Madotto, Chen Wei, Tengyu Ma, Jiale Zhi, Jathushan Rajasegaran, Hanoona Rasheed, et al. Perception encoder: The best visual embeddings are not at the output of the network. arXiv preprint arXiv:2504.13181, 2025.", + "[105] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023.", + "[106] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + ], + "bbox": [ + 171, + 90, + 828, + 912 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[107] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Intervl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024.", + "[108] Brandon Castellano. PySceneDetect.", + "[109] Ahmed Masry, Do Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Findings of the Association for Computational Linguistics: ACL 2022, pages 2263-2279, Dublin, Ireland, May 2022. Association for Computational Linguistics.", + "[110] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019.", + "[111] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering, 2017.", + "[112] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019.", + "[113] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017.", + "[114] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019.", + "[115] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonio Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, Miguel Martin, Tushar Nagarajan, Ilija Radosavovic, Santhosh Kumar Ramakrishnan, Fiona Ryan, Jayant Sharma, Michael Wray, Mengmeng Xu, Eric Zhongcong Xu, Chen Zhao, Siddhant Bansal, Dhruv Batra, Vincent Cartillier, Sean Crane, Tien Do, Morrie Doulaty, Akshay Erapalli, Christoph Feichtenhofer, Adriano Fragomeni, Qichen Fu, Abraham Gebreselasie, Cristina Gonzalez, James Hillis, Xuhua Huang, Yifei Huang, Wenqi Jia, Weslie Khoo, Jachym Kolar, Satwik Kottur, Anurag Kumar, Federico Landini, Chao Li, Yanghao Li, Zhenqiang Li, Karttikeya Mangalam, Raghava Modhugu, Jonathan Munro, Tullie Murrell, Takumi Nishiyasu, Will Price, Paola Ruiz Puentes, Merey Ramazanova, Leda Sari, Kiran Somasundaram, Audrey Southerland, Yusuke Sugano, Ruijie Tao, Minh Vo, Yuchen Wang, Xindi Wu, Takuma Yagi, Ziwei Zhao, Yunyi Zhu, Pablo Arbelaez, David Crandall, Dima Damen, Giovanni Maria Farinella, Christian Fuegen, Bernard Ghanem, Vamsi Krishna Ithapu, C. V. Jawahar, Hanbyul Joo, Kris Kitani, Haizhou Li, Richard Newcombe, Aude Oliva, Hyun Soo Park, James M. Rehg, Yoichi Sato, Jianbo Shi, Mike Zheng Shou, Antonio Torralba, Lorenzo Torresani, Mingfei Yan, and Jitendra Malik. Ego4d: Around the world in 3,000 hours of egocentric video. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022.", + "[116] Kristen Grauman, Andrew Westbury, Lorenzo Torresani, Kris Kitani, Jitendra Malik, Triantafyllos Afouras, Kumar Ashutosh, Vijay Baiyya, Siddhant Bansal, Bikram Boote, Eugene Byrne, Zachary Chavis, Joya Chen, Feng Cheng, Fu-Jen Chu, Sean Crane, Avijit Dasgupta, Jing Dong, María Escobar, Cristhian Forigua, Abraham Kahsay Gebreselasie, Sanjay Haresh, Jing Huang, Md Mohaiminul Islam, Suyog Dutt Jain, Rawal Khirodkar, Devansh Kukreja, Kevin J Liang, Jia-Wei Liu, Sagnik Majumder, Yongsen Mao, Miguel Martin, Effrosyni Mavroudi, Tushar Nagarajan, Francesco Ragusa, Santhosh K. Ramakrishnan, Luigi Seminara, Arjun Somayazulu, Yale Song, Shan Su, Zihui Xue, Edward Zhang, Jinxu Zhang, Angela Castillo, Changan Chen, Xinzhu Fu, Ryosuke Furuta, Cristina Gonzalez, Prince Gupta, Jiabo Hu, Yifei Huang, Yiming Huang, Weslie Khoo, Anush Kumar, Robert Kuo, Sach Lakhavani, Miao Liu, Mingjing Luo, Zhengyi Luo, Brighid Meredith, Austin Miller, Oluwatuminu Oguntola, Xiaqing Pan, Penny Peng, Shraman Pramanick, Merey Ramazanova, Fiona Ryan, Wei Shan, Kiran Somasundaram, Chenan Song, Audrey Southerland, Masatoshi Tateno, Huiyu Wang, Yuchen Wang, Takuma Yagi, Mingfei Yan, Xitong Yang, Zecheng Yu, Shengxin Cindy Zha, Chen Zhao, Ziwei Zhao, Zhifan Zhu, Jeff Zhuo, Pablo Arbeláez, Gedas Bertasius, David J. Crandall, Dima Damen, Jakob Julian Engel, Giovanni Maria Farinella, Antonino Furnari, Bernard Ghanem, Judy Hoffman, C. V. Jawahar, Richard A. Newcombe, Hyun Soo Park, James M. Rehg, Yoichi Sato, Manolis Savva, Jianbo Shi, Mike Zheng Shou, and Michael Wray. Ego-exo4d: Understanding skilled human activity from first- and third-person perspectives. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19383-19400, 2023." + ], + "bbox": [ + 173, + 90, + 825, + 911 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[117] Yansong Tang, Dajun Wang, Zhenyu Xu, Jingjing Liu, Xiaoyong Wang, Xing Gao, Jinhui Tang, and Dong Wu. Coin: A large-scale dataset for comprehensive instructional video analysis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.", + "[118] Dimitri Zhukov, Jean-Baptiste Alayrac, Chen Sun, Ivan Laptev, Cordelia Schmid, and Josef Sivic. Cross-task weakly supervised learning from instructional videos. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.", + "[119] Thong Thanh Nguyen, Zhiyuan Hu, Xiaobao Wu, Cong-Duy T Nguyen, See-Kiong Ng, and Anh Tuan Luu. Encoding and controlling global semantics for long-form video question answering. arXiv preprint arXiv:2405.19723, 2024.", + "[120] Kexin Yi, Chuang Gan, Yunzhu Li, Pushmeet Kohli, Jiajun Wu, Antonio Torralba, and Joshua B Tenenbaum. Clevrer: Collision events for video representation and reasoning. arXiv preprint arXiv:1910.01442, 2019.", + "[121] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017.", + "[122] Raghav Goyal, Samira Ebrahimi Kahou, Vincent Michalski, Joanna Materzynska, Susanne Westphal, Heuna Kim, Valentin Haenel, Ingo Fruend, Peter Yianilos, Moritz Mueller-Freitag, et al. The\" something something\" video database for learning and evaluating visual common sense. In Proceedings of the IEEE international conference on computer vision, pages 5842-5850, 2017.", + "[123] Paul Voigtlaender, Soravit Changpinyo, Jordi Pont-Tuset, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with video localized narratives. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2461-2471, 2023.", + "[124] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024.", + "[125] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015.", + "[126] Soichiro Fujita, Tsutomu Hirao, Hidetakam Kamigaito, Manabu Okumura, and Masaaki Nagata. Soda: Story oriented dense video captioning evaluation framework. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VI 16, pages 517-531. Springer, 2020.", + "[127] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 11:635-651, 2023.", + "[128] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.", + "[129] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023.", + "[130] Tri Dao. Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691, 2023.", + "[131] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Alan Lerer. Automatic differentiation in pytorch, 2017.", + "[132] Montalvo Pablo and Wightman Ross. PDF association dataset (pdfa), 2024.", + "[133] Montalvo Pablo and Wightman Ross. Industry documents library (idl), 2024.", + "[134] Lei Li, Yuqi Wang, Runxin Xu, Peiyi Wang, Xiachong Feng, Lingpeng Kong, and Qi Liu. Multimodal arxiv: A dataset for improving scientific comprehension of large vision-language models. arXiv preprint arXiv:2403.00231, 2024.", + "[135] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[136] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, Tom Duerig, and Vittorio Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. IJCV, 2020.", + "[137] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in neural information processing systems, 34:23634-23651, 2021.", + "[138] Mathew Monfort, SouYoung Jin, Alexander Liu, David Harwath, Rogerio Feris, James Glass, and Aude Oliva. Spoken moments: Learning joint audio-visual representations from video descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14871–14881, 2021.", + "[139] Gunnar A Sigurdsson, Gúl Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 510-526. Springer, 2016.", + "[140] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pages 5803-5812, 2017.", + "[141] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with $2.8\\mathrm{m}$ challenging questions. arXiv preprint arXiv:2502.13124, 2025.", + "[142] Kushal Kafle, Scott Cohen, Brian Price, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In CVPR, 2018.", + "[143] Nitesh Methani, Pritha Ganguly, Mitesh M. Khapra, and Pratyush Kumar. Plotqa: Reasoning over scientific plots. In The IEEE Winter Conference on Applications of Computer Vision (WACV), March 2020.", + "[144] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps, 2022.", + "[145] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 947-952, 2019.", + "[146] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020.", + "[147] Samira Ebrahimi Kahou, Vincent Michalski, Adam Atkinson, Akos Kadar, Adam Trischler, and Yoshua Bengio. Figureqa: An annotated figure dataset for visual reasoning, 2018.", + "[148] Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. The hateful memes challenge: Detecting hate speech in multimodal memes. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 2611-2624. Curran Associates, Inc., 2020.", + "[149] Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C. Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning, 2016.", + "[150] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. In The 35th Conference on Neural Information Processing Systems (NeurIPS) Track on Datasets and Benchmarks, 2021.", + "[151] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning, 2023.", + "[152] Yilun Zhao, Chen Zhao, Linyong Nan, Zhenting Qi, Wenlin Zhang, Xiangru Tang, Boyu Mi, and Dragomir Radev. Robut: A systematic study of table qa robustness against human-annotated adversarial perturbations. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6064–6081, Toronto, Canada, July 2023. Association for Computational Linguistics." + ], + "bbox": [ + 173, + 90, + 825, + 911 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[153] Hugo Laurençon, Léo Tronchon, and Victor Sanh. Unlocking the conversion of web screenshots into html code with the websight dataset, 2024.", + "[154] Yuke Zhu, Oliver Groth, Michael Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. In IEEE Conference on Computer Vision and Pattern Recognition, 2016.", + "[155] Manoj Acharya, Kushal Kafle, and Christopher Kanan. Tallyqa: Answering complex counting questions. In AAAI, 2019.", + "[156] Jonas Belouadi, Anne Lauscher, and Steffen Eger. Automatikz: Text-guided synthesis of scientific vector graphics with tikz, 2024.", + "[157] Mengye Ren, Ryan Kiros, and Richard Zemel. Exploring models and data for image question answering. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015.", + "[158] Jason Obeid and Enamul Hoque. Chart-to-text: Generating natural language descriptions for charts by adapting the transformer model. In Brian Davis, Yvette Graham, John Kelleher, and Yaji Sripada, editors, Proceedings of the 13th International Conference on Natural Language Generation, pages 138-147, Dublin, Ireland, December 2020. Association for Computational Linguistics.", + "[159] Benny J. Tang, Angie Boggust, and Arvind Satyanarayan. Vistext: A benchmark for semantically rich chart captioning. In The Annual Meeting of the Association for Computational Linguistics (ACL), 2023.", + "[160] Zhiyu Chen, Wenhu Chen, Charese Smiley, Sameena Shah, Iana Borova, Dylan Langdon, Reema Moussa, Matt Beane, Ting-Hao Huang, Bryan Routledge, and William Yang Wang. Finqa: A dataset of numerical reasoning over financial data. In Marie-Francine Moens, Xuanjing Huang, Lucia Specia, and Scott Wen-tau Yih, editors, Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3697-3711, Online and Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics.", + "[161] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marcal Rusinol, C.V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4290-4300, 2019.", + "[162] Fengbin Zhu, Wenqiang Lei, Youcheng Huang, Chao Wang, Shuo Zhang, Jiancheng Lv, Fuli Feng, and Tat-Seng Chua. Tat-qa: A question answering benchmark on a hybrid of tabular and textual content in finance. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3277–3287, Online, August 2021. Association for Computational Linguistics.", + "[163] Chris Wendler. Renderedtext, 2024.", + "[164] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. Raven: A dataset for relational and analogical visual reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.", + "[165] Urs-Viktor Marti and H. Bunke. Theiam-database:An english sentence database for offline handwriting recognition.International Journal on Document Analysis and Recognition,5:39-46,11 2002.", + "[166] Pan Lu, Liang Qiu, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, Tanmay Rajpurohit, Peter Clark, and Ashwin Kalyan. Dynamic prompt learning via policy gradient for semi-structured mathematical reasoning. In International Conference on Learning Representations (ICLR), 2023.", + "[167] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension, 2020.", + "[168] Bryan Wang, Gang Li, Xin Zhou, Zhourong Chen, Tovi Grossman, and Yang Li. Screen2words: Automatic mobile ui summarization with multimodal learning. In The 34th Annual ACM Symposium on User Interface Software and Technology, UIST '21, page 498-510, New York, NY, USA, 2021. Association for Computing Machinery.", + "[169] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning, 2023.", + "[170] Aniruddha Kembhavi, Minjoon Seo, Dustin Schwenk, Jonghyun Choi, Ali Farhadi, and Hannaneh Hajishirzi. Are you smarter than a sixth grader? textbook question answering for multimodal machine comprehension. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5376-5384, 2017." + ], + "bbox": [ + 173, + 90, + 826, + 910 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[171] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. Visualmrc: Machine reading comprehension on document images. In AAAI, 2021.", + "[172] Jason Lau, Soumya Gayen, Asma Ben Abacha, and Dina Demner-Fushman. A dataset of clinically generated visual questions and answers about radiology images. Scientific Data, 5:180251, 11 2018.", + "[173] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, Jian-Guang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio, editors, Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1094-1110, Dublin, Ireland, May 2022. Association for Computational Linguistics.", + "[174] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. In The Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021), 2021.", + "[175] Diagram image to text dataset, 2023.", + "[176] Bo Li, Yuanhan Zhang, Liangyu Chen, Jinghao Wang, Fanyi Pu, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Mimic-it: Multi-modal in-context instruction tuning, 2023.", + "[177] Yilun Zhao, Yunxiang Li, Chenying Li, and Rui Zhang. Multihiertt: Numerical reasoning over multi hierarchical tabular and textual data. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6588-6600, Dublin, Ireland, May 2022. Association for Computational Linguistics.", + "[178] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Anna Korhonen, David Traum, and Lluis Márquez, editors, Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6418-6428, Florence, Italy, July 2019. Association for Computational Linguistics.", + "[179] Harsh Jhamtani et al. Learning to describe differences between pairs of similar images. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4024-4034, Brussels, Belgium, October-November 2018. Association for Computational Linguistics.", + "[180] Haoping Bai, Shancong Mou, Tatiana Likhomanenko, Ramazan Gokberk Cinbis, Oncel Tuzel, Ping Huang, Jiulong Shan, Jianjun Shi, and Meng Cao. Vision datasets: A benchmark for vision-based industrial inspection. arXiv preprint arXiv:2306.07890, 2023.", + "[181] Tanmay Gupta, Dustin Schwenk, Ali Farhadi, Derek Hoiem, and Aniruddha Kembhavi. Imagine this! scripts to compositions to videos. In Proceedings of the European conference on computer vision (ECCV), pages 598-613, 2018.", + "[182] Benno Krojer, Vaibhav Adlakha, Vibhav Vineet, Yash Goyal, Edoardo Ponti, and Siva Reddy. Image retrieval from contextual descriptions. arXiv preprint arXiv:2203.15867, 2022.", + "[183] Phillip Isola, Joseph J Lim, and Edward H Adelson. Discovering states and transformations in image collections. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1383-1391, 2015.", + "[184] Yingshan Chang, Mridu Narang, Hisami Suzuki, Guihong Cao, Jianfeng Gao, and Yonatan Bisk. Webqa: Multihop and multimodal qa. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16495-16504, 2022.", + "[185] Maxwell Forbes, Christine Kaeser-Chen, Piyush Sharma, and Serge Belongie. Neural naturalist: Generating fine-grained image comparisons. arXiv preprint arXiv:1909.04101, 2019.", + "[186] Hareesh Ravi, Kushal Kafle, Scott Cohen, Jonathan Brandt, and Mubbasir Kapadia. Aesop: Abstract encoding of stories, objects, and pictures. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2052-2063, 2021.", + "[187] Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Nazli Ikizler-Cinbis. Recipeqa: A challenge dataset for multimodal comprehension of cooking recipes. arXiv preprint arXiv:1809.00812, 2018.", + "[188] Dong Huk Park, Trevor Darrell, and Anna Rohrbach. Robust change captioning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4624-4633, 2019." + ], + "bbox": [ + 173, + 90, + 825, + 912 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 49 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[189] Rumeysa Bodur, Erhan Gundogdu, Binod Bhattarai, Tae-Kyun Kim, Michael Donoser, and Loris Bazzani. iedit: Localised text-guided image editing with weak supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7426-7435, 2024.", + "[190] Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Chengqing Zong and Michael Strube, editors, Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1470–1480, Beijing, China, July 2015. Association for Computational Linguistics.", + "[191] Ye Yuan, Xiao Liu, Wondimu Dikubab, Hui Liu, Zhilong Ji, Zhongqin Wu, and Xiang Bai. Syntax-aware network for handwritten mathematical expression recognition. arXiv preprint arXiv:2203.01601, 2022.", + "[192] Yasumasa Onoe, Sunayana Rane, Zachary Berger, Yonatan Bitton, Jaemin Cho, Roopal Garg, Alexander Ku, Zarana Parekh, Jordi Pont-Tuset, Garrett Tanzer, et al. Docci: Descriptions of connected and contrasting images. In European Conference on Computer Vision, pages 291-309. Springer, 2024.", + "[193] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating clip-style models on dense captions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26700-26709, 2024.", + "[194] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen-tau Yih, et al. Altogether: Image captioning via re-aligning alt-text. arXiv preprint arXiv:2410.17251, 2024.", + "[195] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019.", + "[196] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE international conference on computer vision, pages 2641–2649, 2015.", + "[197] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 787-798, 2014.", + "[198] Xiyao Wang, Yuhang Zhou, Xiaoyu Liu, Hongjin Lu, Yuancheng Xu, Feihong He, Jaehong Yoon, Taixi Lu, Gedas Bertasius, Mohit Bansal, Huaxiu Yao, and Furong Huang. Mementos: A comprehensive benchmark for multimodal large language model reasoning over image sequences, 2024.", + "[199] Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23056-23065, 2023.", + "[200] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021.", + "[201] Nazneen Rajani, Lewis Tunstall, Edward Beeching, Nathan Lambert, Alexander M. Rush, and Thomas Wolf. No robots. https://huggingface.co/datasets/HuggingFaceH4/no Robots, 2023.", + "[202] Aida Amini, Saadia Gabriel, Peter Lin, Rik Koncel-Kedziorski, Yejin Choi, and Hannaneh Hajishirzi. Mathqa: Towards interpretable math word problem solving with operation-based formalisms. arXiv preprint arXiv:1905.13319, 2019.", + "[203] Chunting Zhou, Pengfei Liu, Puxin Xu, Srinivasan Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, et al. Lima: Less is more for alignment. Advances in Neural Information Processing Systems, 36:55006-55021, 2023.", + "[204] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "[205] Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. Finetuned language models are zero-shot learners. In International Conference on Learning Representations." + ], + "bbox": [ + 173, + 90, + 826, + 911 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[206] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023.", + "[207] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024.", + "[208] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575, 2015.", + "[209] Kaichen Zhang, Bo Li, Peiyuan Zhang, Fanyi Pu, Joshua Adrian Cahyono, Kairui Hu, Shuai Liu, Yuhan Zhang, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Reality check on the evaluation of large multimodal models, 2024.", + "[210] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multimodality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024.", + "[211] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023.", + "[212] Bin Yan, Yi Jiang, Jiannan Wu, Dong Wang, Zehuan Yuan, Ping Luo, and Huchuan Lu. Universal instance perception as object discovery and retrieval. In CVPR, 2023.", + "[213] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172, 2023.", + "[214] Jang Hyun Cho, Boris Ivanovic, Yulong Cao, Edward Schmerling, Yue Wang, Xinshuo Weng, Boyi Li, Yurong You, Philipp Kraehenbuehl, Yan Wang, and Marco Pavone. Language-image models with 3d understanding. In The Thirteenth International Conference on Learning Representations, 2025.", + "[215] Yale Song, Eugene Byrne, Tushar Nagarajan, Huiyu Wang, Miguel Martin, and Lorenzo Torresani. Ego4d goal-step: Toward hierarchical understanding of procedural activities. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 38863-38886. Curran Associates, Inc., 2023.", + "[216] Triantafyllos Afouras, Effrosyni Mavroudi, Tushar Nagarajan, Huiyu Wang, and Lorenzo Torresani. HT-step: Aligning instructional articles with how-to videos. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023.", + "[217] Effrosyni Mavroudi, Triantafyllos Afouras, and Lorenzo Torresani. Learning to ground instructional articles in videos through narrations. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 15201-15213, October 2023.", + "[218] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024.", + "[219] Hyolim Kang, Jinwoo Kim, Taehyun Kim, and Seon Joo Kim. Uboco: Unsupervised boundary contrastive learning for generic event boundary detection. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20041-20050, 2021.", + "[220] Zexing Du, Xue Wang, Guoqing Zhou, and Qing Wang. Fast and unsupervised action boundary detection for action segmentation. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3313-3322, 2022.", + "[221] PySceneDetect: Video Cut Detection and Analysis Tool, https://github.com/breakthrough/pyscenedetect.", + "[222] J. S. Chung and A. Zisserman. Out of time: automated lip sync in the wild. In Workshop on Multi-view Lip-reading, ACCV, 2016." + ], + "bbox": [ + 173, + 90, + 825, + 911 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[223] Zi-Yi Dou, Xitong Yang, Tushar Nagarajan, Huiyu Wang, Jing Huang, Nanyun Peng, Kris Kitani, and Fu-Jen Chu. Unlocking exocentric video-language data for egocentric video representation learning. ArXiv, abs/2408.03567, 2024.", + "[224] Dandan Shan, Jiaqi Geng, Michelle Shu, and David Fouhey. Understanding human hands in contact at internet scale. In CVPR, 2020.", + "[225] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In Proceedings of the 37th International Conference on Neural Information Processing Systems, 2023.", + "[226] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021.", + "[227] Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. Perceiver io: A general architecture for structured inputs & outputs. ICLR, 2022.", + "[228] F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung. A benchmark dataset and evaluation methodology for video object segmentation. In Computer Vision and Pattern Recognition, 2016.", + "[229] Sergi Caelles, Jordi Pont-Tuset, Federico Perazzi, Alberto Montes, Kevis-Kokitsi Maninis, and Luc Van Gool. The 2019 davis challenge on vos: Unsupervised multi-object segmentation. arXiv:1905.00737, 2019.", + "[230] Yan Yan, Chenliang Xu, Dawen Cai, and Jason J Corso. Weakly supervised actor-action segmentation via robust multi-task ranking. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1298-1307, 2017.", + "[231] Ujjal Kr Dutta, Mehrtash Harandi, and Chellu Chandra Sekhar. Unsupervised deep metric learning via orthogonality based probabilistic loss. IEEE Transactions on Artificial Intelligence, 1(1):74-84, 2020.", + "[232] Luowei Zhou, Yannis Kalantidis, Xinlei Chen, Jason J Corso, and Marcus Rohrbach. Grounded video description. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6578-6587, 2019.", + "[233] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020.", + "[234] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021.", + "[235] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2694-2703, 2023.", + "[236] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16375-16387, 2022." + ], + "bbox": [ + 173, + 90, + 825, + 739 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 52 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_model.json b/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6c06b3cdd31223d7826f087bcaac7f8c6ebbe166 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_model.json @@ -0,0 +1,9997 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.282, + 0.058, + 0.716 + ], + "angle": 270, + "content": "arXiv:2504.13180v3 [cs.CV] 23 Jul 2025" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.123, + 0.803, + 0.175 + ], + "angle": 0, + "content": "PerceptionLM: Open-Access Data and Models for Detailed Visual Understanding" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.206, + 0.836, + 0.313 + ], + "angle": 0, + "content": "Jang Hyun Cho\\(^{1,2,\\ast,\\dagger}\\), Andrea Madotto\\(^{1,\\ast}\\), Effrosyni Mavroudi\\(^{1,\\ast}\\), Triantafyllos Afouras\\(^{1,\\ast}\\), Tushar Nagarajan\\(^{1,\\ast}\\), Muhammad Maaz\\(^{3,\\ast,\\dagger}\\), Yale Song\\(^{1,\\ast}\\), Tengyu Ma\\(^{1,\\ast}\\), Shuming Hu\\(^{1,\\ast}\\), Suyog Jain\\(^{1}\\), Miguel Martin\\(^{1}\\), Huiyu Wang\\(^{1}\\), Hanoona Rasheed\\(^{3,\\dagger}\\), Peize Sun\\(^{1}\\), Po-Yao Huang\\(^{1}\\), Daniel Bolya\\(^{1}\\), Nikhila Ravi\\(^{1}\\), Shashank Jain\\(^{4}\\), Tammy Stark\\(^{4}\\), Shane Moon\\(^{4}\\), Babak Damavandi\\(^{4}\\), Vivian Lee\\(^{1}\\), Andrew Westbury\\(^{1}\\), Salman Khan\\(^{3}\\), Philipp Krähenbuhl\\(^{2}\\), Piotr Dólar\\(^{1}\\), Lorenzo Torresani\\(^{1,\\star}\\), Kristen Grauman\\(^{1,2,\\star}\\), Christoph Feichtenhofer\\(^{1,\\star}\\)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.318, + 0.605, + 0.335 + ], + "angle": 0, + "content": "\\(^{1}\\)Meta FAIR \\(^{2}\\)UT Austin \\(^{3}\\)MBZUAI \\(^{4}\\)Meta Reality Labs" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.34, + 0.686, + 0.358 + ], + "angle": 0, + "content": "*Joint first author †Work done during internships at Meta *Project lead" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.39, + 0.538, + 0.406 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.421, + 0.769, + 0.644 + ], + "angle": 0, + "content": "Vision-language models are integral to computer vision research, yet many high-performing models remain closed-source, obscuring their data, design and training recipe. The research community has responded by using distillation from black-box models to label training data, achieving strong benchmark results, at the cost of measurable scientific progress. However, without knowing the details of the teacher model and its data sources, scientific progress remains difficult to measure. In this paper, we study building a Perception Language Model (PLM) in a fully open and reproducible framework for transparent research in image and video understanding. We analyze standard training pipelines without distillation from proprietary models and explore large-scale synthetic data to identify critical data gaps, particularly in detailed video understanding. To bridge these gaps, we release 2.8M human-labeled instances of fine-grained video question-answer pairs and spatio-temporally grounded video captions. Additionally, we introduce PLM-VideoBench, a suite for evaluating challenging video understanding tasks focusing on the ability to reason about \"what\", \"where\", \"when\", and \"how\" of a video. We make our work fully reproducible by providing data, training recipes, code & models." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.648, + 0.668, + 0.664 + ], + "angle": 0, + "content": "GitHub: https://github.com/facebookresearch/perception_models" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.683, + 0.314, + 0.699 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.713, + 0.827, + 0.799 + ], + "angle": 0, + "content": "Vision-language models (VLMs) are now a key part of computer vision research and are widely used in both academia and industry. Many of the strongest performing VLMs are closed-source, meaning their design, training methods, and the data they use are not publicly shared. To stay competitive, the research community has started to catch up to the proprietary models by using a straightforward approach — distillation from black-box models [1, 2, 3, 4, 5], where proprietary models are directly used to label training data [3, 6, 7], directly leading to strong benchmark results." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.803, + 0.828, + 0.903 + ], + "angle": 0, + "content": "Although distillation will unlock strong performance, there are two main issues for basic research. First, it makes it hard to track scientific progress. Specifically, we cannot tell if better results on benchmarks are due to advances in model design or training, or simply because the proprietary teacher models were trained on the evaluation sets of widely used benchmarks or internal data collected to resemble them — this information is not available. Second, the heavy reliance on distillation leads to a fundamental misunderstanding of the effectiveness of current methods for training VLMs from scratch. Several key questions remain unanswered, including the significance of each training stage," + }, + { + "type": "footer", + "bbox": [ + 0.173, + 0.923, + 0.249, + 0.939 + ], + "angle": 0, + "content": "Meta" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.089, + 0.825, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.203, + 0.825, + 0.242 + ], + "angle": 0, + "content": "Figure 1: We introduce the largest collection of manually annotated fine-grained activity QA and spatiotemporal captioning data (left panel). Together with this data, we train and release PLM —open and fully reproducible models to facilitate research in vision-language model training (right panel)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.261, + 0.827, + 0.29 + ], + "angle": 0, + "content": "the influence of synthetic data, the data gaps that the research community should prioritize, and which of these gaps are currently being artificially addressed by distillation from proprietary models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.295, + 0.825, + 0.392 + ], + "angle": 0, + "content": "To better understand these challenges, we develop the Perception Language Model (PLM), a fully open and reproducible model for transparent research in image and video understanding (Fig. 1 right). PLM consists of a vision encoder with a small scale (<8B parameters) LLM decoder. We start by an analysis of standard training pipelines with available data, without any proprietary model distillation. We investigate large-scale synthetic data and establish key scaling laws to identify critical data gaps that limit video understanding performance, especially for spatio-temporal reasoning and fine-grained understanding tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.399, + 0.827, + 0.496 + ], + "angle": 0, + "content": "To fill these gaps, we create 2.8M high-quality human-labeled instances of fine-grained video QA and spatio-temporally grounded video captions, see Fig. 1. This release is nearly an order of magnitude larger than the largest existing video datasets of each type [8, 9]. Our model, dataset and benchmark push the boundaries of video understanding, and provide a foundation for reproducible and transparent training and evaluation of VLM research. Across 40 image and video benchmarks, we achieve comparable performance with existing state-of-the-art open-weight models (e.g., InternVL2.5 [10]), without distilling from proprietary models, and greatly outperform fully open models (i.e., Molmo [11])." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.52, + 0.323, + 0.535 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.554, + 0.827, + 0.637 + ], + "angle": 0, + "content": "Vision-Language Models. Building on the strengths of large language models (LLMs), several vision-language models (VLMs) have recently been proposed for image understanding [1, 12, 13, 14, 15, 16, 17, 18, 19], video understanding [20, 21, 22, 23, 24, 25, 26, 27] and joint understanding of both images and videos [10, 28, 29, 30]. These works employ several modeling advancements such as dynamic high resolution inputs [12], adaptive token compression [25, 31], and multimodal positional embeddings [30]." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.657, + 0.827, + 0.74 + ], + "angle": 0, + "content": "Open source, open data VLMs. Training data is a key component in developing powerful VLMs. Many existing approaches train on proprietary data that is not released to the community [32, 33, 34, 35, 36] or on data generated using proprietary models (e.g., GPT4o) [3], effectively distilling the closed models. Doing so make measuring scientific progress difficult and limits research on how to train VLMs ground-up. Molmo [11] proposes a class of open-data models, however, they are image VLMs trained on relatively small-scale data, limiting their performance as our experiments will show." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.76, + 0.827, + 0.911 + ], + "angle": 0, + "content": "VLM Benchmarks. Several benchmarks have been proposed to assess the capabilities of VLMs. Popular image benchmarks cover broad perception and reasoning [37, 38, 39, 40, 41, 42, 43, 44, 19, 45, 46, 47, 48] as well as capabilities like image captioning [49, 50, 51], document/diagram understanding [52, 53, 54, 55, 56, 57, 58, 59, 60, 61], mathematical reasoning [62, 63, 64], visual grounding [65, 66] and hallucination [67, 68]. Popular video benchmarks cover video question answering [20, 8, 69, 70, 71, 72, 73, 74, 75, 76, 77, 22, 78, 79, 80], video captioning [81, 82, 83, 84, 85, 86, 87], and hallucination in videos [88, 89]. Many of these video benchmarks remain image-centric — they have questions that can be answered with a few frames. Video-centric reasoning in benchmarks has been relatively neglected with benchmarks proposed only recently for long video understanding [90, 91, 92, 93, 94, 95, 96, 97, 98] and fine-grained, temporal reasoning [99, 100, 101, 102, 103]. We introduce PLM-VideoBench—a benchmark suite aimed at the core, video" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "centric capabilities that current benchmarks neglect, namely fine-grained activity understanding and spatio-temporally grounded reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.148, + 0.344, + 0.166 + ], + "angle": 0, + "content": "3 PLM: Overview" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.185, + 0.825, + 0.214 + ], + "angle": 0, + "content": "In this section, we overview the model, training stages and training data involved in the development of PLM. Please refer to Fig. 8 for a detailed overview and Appendix A for additional details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.236, + 0.487, + 0.404 + ], + "angle": 0, + "content": "Model. PLM consists of a vision encoder and language decoder, where a pre-trained Perception Encoder (PE) [104] is connected to the Llama 3 [13] language decoder (1B, 3B, or 8B parameters) with a 2-layer MLP projector. We use PE L/14 for Llama3.2 1B and 3B, and PE G/14 for Llama3.1 8B. For image input, PLM incorporates dynamic tiling to support high resolution images for up to 36 tiles of \\(448^{2}\\) resolution, where each tile undergoes \\(2 \\times 2\\) average input, PLM uses 32 frames at \\(448^{2}\\) resolution, v dimensions of each video frame." + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.239, + 0.822, + 0.33 + ], + "angle": 0, + "content": "
Stage 1 WarmupStage 2 MidtrainingStage 3 SFT
ModalityImageImage + VideoImage + Video
Data1M Synthetic72M Mix19M Mix
TrainingProjectileFullFull
Downsampling-2 × 22 × 2
Tiles/Frames1/-16/1636/32
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.496, + 0.337, + 0.826, + 0.363 + ], + "angle": 0, + "content": "Table 1: Summary of three training stages to train PLM. See Appendix Table 7 and Table 8 for data splits." + }, + { + "type": "table_footnote", + "bbox": [ + 0.496, + 0.362, + 0.825, + 0.402 + ], + "angle": 0, + "content": "pooling to compress the visual tokens. For video where the same pooling is applied across the spatial" + }, + { + "type": "list", + "bbox": [ + 0.496, + 0.337, + 0.826, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.426, + 0.827, + 0.496 + ], + "angle": 0, + "content": "Data. The data used to train the PLM consists of synthetic and human-annotated samples. Synthetic data enhances the general capabilities of PLM, while human-annotated data broadens these capabilities to encompass more complex tasks. Synthetic data is sourced from a diverse array of image and video datasets, covering fundamental VLM capabilities such as OCR, chart/document/diagram understanding, image/video captioning, and visual question answering." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.826, + 0.587 + ], + "angle": 0, + "content": "We design data engines for each data modality (e.g., natural images, charts, documents, figures, egocentric and exocentric videos) to efficiently scale up, creating \\(\\sim 66.1\\mathrm{M}\\) samples (\\(\\S 4\\)). The synthetic data can be noisy, but is available at large scale; on the other hand, human-annotated data provides rich, high-quality supervision for image and video tasks. Here, we combine existing human annotations of diverse image and video sources, with our own collected human-annotated data, specifically geared towards fine-grained video understanding and spatio-temporally grounded reasoning (\\(\\S 5\\))." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.608, + 0.48, + 0.624 + ], + "angle": 0, + "content": "Training stages. PLM trains in three stages:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.629, + 0.825, + 0.686 + ], + "angle": 0, + "content": "1. **Projector warm-up.** First, we freeze the vision encoder and LLM and only train the vision projector on a small amount of synthetic image data. This warms-up the newly initialized parameters in the projector and improves stability for later stages. We use \\( 1M \\) images from SA-1B [105] with the image captions generated by our data engine (§4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.691, + 0.551, + 0.789 + ], + "angle": 0, + "content": "2. Large-scale midtraining with synthetic data. Next, we train PLM on diverse domains of images and videos at scale, using a maximum of 16 tiles for images and 16 frames for videos. PLM sees around 64.7M images and videos with synthetically generated captions and question-answer pairs. We employ our data engine to scale up synthetic data generation (see §4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.795, + 0.552, + 0.878 + ], + "angle": 0, + "content": "3. Supervised fine-tuning with human-annotated data. Finally, we train PLM with higher image resolutions and more video frames, using up to 36 tiles for images and 32 frames for videos. In this stage, we tackle more challenging video tasks, including fine-grained QA and spatiotemporally grounded reasoning." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.629, + 0.825, + 0.878 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.564, + 0.694, + 0.822, + 0.835 + ], + "angle": 0, + "content": "
SamplesTypeStage
Our Human-annotated (2.87M)
PLM-FGQA2.4MFine-grained3
PLM-STC476.2KR(D)Cap + RTL3
Our Synthetic (66.1M)
Natural Images15.9MCaption1,2,3
Charts & Documents31.9MCaption2,3
Videos Mix17.5MMix.2,3
Ego4D880KCap. + QA2,3
Existing Open Source (6.52M)
Image (92 datasets)5.6MDiverse2,3
Video (27 datasets)920KDiverse2,3
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.56, + 0.841, + 0.826, + 0.868 + ], + "angle": 0, + "content": "Table 2: Summary of the data mix for training PLM. See Table 9 for the full data blend." + }, + { + "type": "table_footnote", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Table 1 shows an overview of our training setup for each stage. Appendix A.1 provides the complete training recipe for each stage, including hyperparameters and data sources." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.533, + 0.108 + ], + "angle": 0, + "content": "4 Synthetic Data Generation and Scaling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.827, + 0.233 + ], + "angle": 0, + "content": "The predominant paradigm for VLM training is to generate synthetic annotations as cheap alternatives to human-labeled data [1, 106, 30, 107, 10, 11, 15]. Although seemingly promising to get the best results on benchmarks, the majority of such data shared in the community is derived from proprietary models. This trend makes it hard to decouple scientific progress from proprietary distillation impact. In this section, we explore the efficacy of the current paradigm for VLM training in a transparent manner. We design our data engine entirely from open-source models and scale the synthetic data generation to around 66.1M samples of images and videos. We establish the scaling laws of training from synthetic data on standard VLM tasks, including image, OCR/document, and video tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.248, + 0.303, + 0.263 + ], + "angle": 0, + "content": "4.1 Data Engine" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.828, + 0.289 + ], + "angle": 0, + "content": "Our data engine is designed to target base capabilities of VLMs for image and video understanding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.302, + 0.825, + 0.373 + ], + "angle": 0, + "content": "Image Data Engine. We generate short and long captions, as well as question-answer pairs, for natural images and those containing documents, diagrams, and text recognizable by optical character recognition (OCR). We prompt openly accessible Llama 3 [13] model to produce factual, detailed image captions while minimizing hallucinations. To create informative question-answer pairs, we utilize OCR data, captions, and other metadata, which are fed into the prompt of a text-only LLM." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.386, + 0.825, + 0.47 + ], + "angle": 0, + "content": "Video Data Engine. For videos, we first use an off-the-shelf scene detector [108] to extract video clips of approximately 30 seconds duration. Then, we extract the keyframes and generate frame-level captions using Llama 3, and video captions using our initial PLM trained with Stage 1 and Stage 3 data as shown in Table 2. We then employ an LLM to refine the frame-level and video captions by incorporating existing video metadata (e.g., action labels, time tags) into a cohesive, detailed video-level caption. Similarly, we generate question-answer pairs from the video-level captions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.827, + 0.518 + ], + "angle": 0, + "content": "The resulting synthetic data is large-scale and diverse – 66.1M samples carefully curated from a variety of image and video sources including natural images, in-the-wild text, chart, figures, documents, egocentric and exocentric videos. Additional details are in Appendix J." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.534, + 0.449, + 0.548 + ], + "angle": 0, + "content": "4.2 Scaling Laws with Synthetic Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.559, + 0.827, + 0.575 + ], + "angle": 0, + "content": "We examine scaling properties of our synthetic data under controlled setup and establish scaling laws." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.587, + 0.819, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.741, + 0.828, + 0.807 + ], + "angle": 0, + "content": "Figure 2: Synthetic Scaling Plots. Relationship between Average Error across benchmarks and training compute (in floating-point operations) for various PLM models. We report average errors across Video QA tasks [75, 72, 90, 8, 70, 71], OCR QA tasks [109, 53, 56, 57], and Natural Images tasks [45, 110, 111, 68, 40, 112]. Model's performance using only human-labeled data subset are reported (No Syst.) as well as the actual power-law fit of each subcategory." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.829, + 0.913 + ], + "angle": 0, + "content": "Setup. To establish power-law relationship between compute and validation-set errors of downstream benchmarks, we vary the scale of synthetic data, language model decoders (1B, 3B, and 8B), vision encoders (300M and 2B), and resolution/number of frames. For each configuration, we train a model with the 66.1M synthetic data from our data engine and 6.5M publicly available human-labeled data, following stage 2 training described in §3. At every 2M samples, we evaluate PLM on three categories of downstream benchmarks (VideoQA, OCR QA, Natural QA), constructed from 20 vision-language understanding benchmarks that provide a comprehensive and general evaluation of" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "multi-modal large language models. We compute the pareto frontier of these data points and fit a power law relationship: \\(\\mathrm{Err.} = (\\beta \\times \\mathrm{FLOP})^{\\alpha}\\) and compare the exponents \\(\\alpha\\) of the power function as scalability of each setup, where a smaller \\(\\alpha\\) implies better scaling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.15, + 0.827, + 0.318 + ], + "angle": 0, + "content": "Scaling with decoder size. Fig. 2 shows the scaling behavior of PLM across various LLM sizes. We show validation-set errors and training compute on a logarithmic scale, with the black linear line representing the power-law relationship between them. Different colors (green, turquoise, and blue) represent different language model scales (1B, 3B, 8B) while keeping the vision encoder size constant at 300M. As described in the setup section above, we show the power law fit of the pareto frontier in each benchmark category. We also show the results of PLM only trained on 4M human-labeled datasets as baselines, denoted with horizontal lines of each color. The gap from the horizontal line to the data point marks the impact of the synthetic data. Interestingly, all three categories of benchmarks demonstrate clear power-law relationship between compute and average benchmark errors, with the power law exponent \\((\\alpha)\\) of \\(-0.15, -0.20,\\) and \\(-0.11\\) for Video QA, OCR QA, and Natural Image QA, respectively. In Appendix B, we provide more details and extend the analysis to (1) scaling the encoder size, and (2) scaling the image resolution and video frames." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.333, + 0.553, + 0.527 + ], + "angle": 0, + "content": "Limitation of synthetic data. In Fig. 3, we evaluate stage 2 on an extended set of video benchmarks. Specifically, we show the result of 7 challenging video tasks on fine-grained activity understanding [97, 100, 89, 101, 99], temporal grounding [113] and long-video reasoning [92]. Unlike generic, high-level understanding (e.g., \"what is happening in this video\"), the \"challenging\" tasks require a thorough understanding of video in space and time, and fine-grained semantic details. As shown, the challenging video tasks (\"HardQA\" in lavender, plum, magenta) show a poor scaling trend \\((-0.03)\\) compared to general video QA \\((-0.15)\\). The stark difference between the two power law fits shows that scaling synthetic data is only effective for established, base tasks. Extending VLMs to" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.527, + 0.825, + 0.555 + ], + "angle": 0, + "content": "these more challenging, complex tasks still remain unsolved. Next, we address this challenge with high-quality human-annotated video data, PLM-FGQA and PLM-STC." + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.336, + 0.825, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.474, + 0.827, + 0.526 + ], + "angle": 0, + "content": "Figure 3: Limitation of synthetic data. Challenging video tasks (HardQA [97, 100, 89, 101, 99, 113, 92]) do not scale well with synthetic data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.576, + 0.519, + 0.594 + ], + "angle": 0, + "content": "5 Human-annotated High Quality Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.608, + 0.825, + 0.72 + ], + "angle": 0, + "content": "As shown in Fig. 3, the current paradigm with synthetic data has run out of steam. Training from tens of millions of synthetically annotated data hardly improves our model on new, challenging video benchmarks. Beyond standard VLM tasks, these benchmarks focus on advanced capabilities such as fine-grained activity understanding, temporal grounding, and long video understanding. Perhaps, the knowledge that these benchmarks examine is simply not present in the initial training set of our data engine nor in existing human-annotated data. Our community lacks high quality datasets for detailed visual understanding to start from, that covers what, where, when, and how of activities in video. To address this gap, we introduce two large-scale, human-annotated video datasets:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.725, + 0.827, + 0.852 + ], + "angle": 0, + "content": "PLM-FGQA is a fine-grained video QA dataset collected by asking human annotators to watch a short video segment and answer model-generated questions which focus on \"what\" activities humans perform and \"how\" they perform these activities. Question types include fine-grained recognition (action and object), fine-grained temporal perception (direction of movements, repetition counts, hand pose etc.), and fine-grained spatial understanding (object locations and spatial relationships). We use a multi-stage data engine to first extract video segments with salient actions from untrimmed videos through temporal clustering and shot-detection. Next, we generate questions and answers using either a text-only LLM or an early version of PLM. Finally, we refine the answers by asking humans to verify or replace them if they are incorrect, resulting in a high-quality QA pairs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Overall, we collect 2.4M question answer pairs from various open-access video datasets [114, 115, 116, 117, 118, 83] spanning over 780k unique video clips from diverse domains (e.g., cooking, DIY, carpentry, automotive and bike repair) and viewpoints (egocentric and third-person); refer to Fig. 13 for domain statistics. This is nearly 8 times larger than the size of the largest existing human-annotated" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.33, + 0.093, + 0.452, + 0.104 + ], + "angle": 0, + "content": "Fine-grained QA (FGQA)" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.108, + 0.387, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.17, + 0.226, + 0.177 + ], + "angle": 0, + "content": "Question" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.178, + 0.34, + 0.186 + ], + "angle": 0, + "content": "How does the person hold the sandpaper?" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.187, + 0.378, + 0.204 + ], + "angle": 0, + "content": "Answer: With their right hand, between the right thumb on one side, fingers on the other side." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.178, + 0.378, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.21, + 0.226, + 0.218 + ], + "angle": 0, + "content": "Question" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.218, + 0.382, + 0.23 + ], + "angle": 0, + "content": "In which direction is the person moving the sandpaper? Answer" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.231, + 0.38, + 0.244 + ], + "angle": 0, + "content": "From the bottom of the baluster to the top in a vertical, oscillating motion." + }, + { + "type": "image", + "bbox": [ + 0.397, + 0.109, + 0.466, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.467, + 0.109, + 0.534, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.535, + 0.109, + 0.604, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.402, + 0.17, + 0.442, + 0.177 + ], + "angle": 0, + "content": "Question" + }, + { + "type": "text", + "bbox": [ + 0.402, + 0.178, + 0.565, + 0.186 + ], + "angle": 0, + "content": "How many chakli snacks does the person flip?" + }, + { + "type": "text", + "bbox": [ + 0.402, + 0.187, + 0.591, + 0.203 + ], + "angle": 0, + "content": "Answer\nThe person flips three chakki snacks with a long metal skewer." + }, + { + "type": "list", + "bbox": [ + 0.402, + 0.178, + 0.591, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.402, + 0.21, + 0.442, + 0.217 + ], + "angle": 0, + "content": "Question" + }, + { + "type": "text", + "bbox": [ + 0.402, + 0.218, + 0.59, + 0.23 + ], + "angle": 0, + "content": "Where is the metal skewer located at the beginning? Answer" + }, + { + "type": "text", + "bbox": [ + 0.402, + 0.231, + 0.589, + 0.244 + ], + "angle": 0, + "content": "Resting on top of the pan, which is positioned on the left burner of the portable stove." + }, + { + "type": "list", + "bbox": [ + 0.402, + 0.218, + 0.59, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.109, + 0.822, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.259, + 0.827, + 0.329 + ], + "angle": 0, + "content": "Figure 4: Overview PLM-FGQA. Examples of question-answer pairs from PLM-FGQA, focusing on fine-grained human activity understanding. PLM-FGQA is approximately 8 times larger than the largest existing human-annotated video QA dataset and addresses a wide range of fine-grained question types that are scarce in existing video QA datasets, such as ones that cover direction of movement, object states, locations and spatial relations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.828, + 0.415 + ], + "angle": 0, + "content": "video QA dataset in the community [91]. Moreover, as illustrated by the breakdown of question types1 in Fig. 4 (top-right), PLM-FGQA contains a large number of annotations about fine-grained details that have been largely missing in existing training video QA datasets [119, 69, 71, 76, 20, 120, 121, 122, 123]. Please refer to Table 16 for comparison with existing datasets Table 17 for dataset examples and Appendix G for further details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.421, + 0.825, + 0.545 + ], + "angle": 0, + "content": "PLM-STC is a spatio-temporal video captioning dataset that offers detailed activity descriptions for each video. It includes timestamps (\"when\") of each activity and focuses on specific subjects identified by a masklet (\"where\"). We employ a two-stage annotation process to improve efficiency in collecting PLM-STC. In the first stage, annotators select interesting objects that exhibit significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. For segments where the subject is out of frame, we automatically supplement \"out of frame\" caption. In the second stage, a separate set of annotators write temporally localized descriptions of the highlighted subject focusing on the changes in action across time in relation to the whole video." + }, + { + "type": "image_caption", + "bbox": [ + 0.42, + 0.563, + 0.578, + 0.572 + ], + "angle": 0, + "content": "Spatio-temporal Captions (STC)" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.576, + 0.387, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.188, + 0.646, + 0.266, + 0.654 + ], + "angle": 0, + "content": "[0,11] Out of frame." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.673, + 0.382, + 0.697 + ], + "angle": 0, + "content": "[12, 67] The person wearing a jacket is running on a snow covered ground. She stops and turns to look the other person." + }, + { + "type": "image", + "bbox": [ + 0.398, + 0.576, + 0.603, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.403, + 0.646, + 0.597, + 0.668 + ], + "angle": 0, + "content": "[0, 19] The man moves gracefully, using his hand gestures that closely resemble a dance in most of his actions." + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.672, + 0.544, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.404, + 0.684, + 0.58, + 0.693 + ], + "angle": 0, + "content": "[20, 31] The person moves from right to left." + }, + { + "type": "image", + "bbox": [ + 0.614, + 0.576, + 0.819, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.636, + 0.638, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.636, + 0.657, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.636, + 0.678, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.636, + 0.69, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.694, + 0.636, + 0.704, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.711, + 0.636, + 0.72, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.724, + 0.636, + 0.734, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.74, + 0.636, + 0.75, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.636, + 0.775, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.636, + 0.798, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.619, + 0.646, + 0.813, + 0.661 + ], + "angle": 0, + "content": "[0, 81] A little girl moves back as a beluga whale approaches her face." + }, + { + "type": "image", + "bbox": [ + 0.621, + 0.674, + 0.639, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.674, + 0.657, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.674, + 0.682, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.674, + 0.709, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.714, + 0.674, + 0.734, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.74, + 0.674, + 0.75, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.674, + 0.775, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.674, + 0.798, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.62, + 0.697, + 0.639, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.697, + 0.657, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.697, + 0.679, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.697, + 0.69, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.694, + 0.697, + 0.705, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.71, + 0.697, + 0.72, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.724, + 0.697, + 0.734, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.74, + 0.697, + 0.75, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.765, + 0.697, + 0.775, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.789, + 0.697, + 0.798, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.724, + 0.827, + 0.781 + ], + "angle": 0, + "content": "Figure 5: Overview of PLM-STC. Examples of spatio-temporally grounded captions from PLM-STC, the first dataset to associate each caption both with a temporal interval as well as a high-fps sequence of segmentation masks of the subject - i.e., masklets (compared to just a temporal interval or a sparse sequence of bounding boxes)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.825, + 0.884 + ], + "angle": 0, + "content": "Overall, we collect 194.2K spatio-temporal captions as the first existing large-scale dense video-region captioning dataset. We convert these spatio-temporal captions into three tasks for training: RCap (194.2K): Given the video region and timestamps, the model generates a caption; RTLoc (194.2K): Given the video region and caption, the model localizes the action; and RDCap (122.3K): Given the video region, the model generates dense, localized captions. In total, we construct \\(194.2\\mathrm{K} + 194.2\\mathrm{K}\\) \\(+122.3\\mathrm{K} = 522.7\\mathrm{K}\\) samples, of which \\(476.2\\mathrm{K}\\) are used for training and the rest for constructing" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.898, + 0.407, + 0.912 + ], + "angle": 0, + "content": "1 obtained with LLM-based tagging." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.12 + ], + "angle": 0, + "content": "PLM-VideoBench. Please refer to Fig. 5 for dataset examples, Table 19 for comparison with existing datasets, Table 20 for dataset statistics and Appendix H for further details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.135, + 0.344, + 0.15 + ], + "angle": 0, + "content": "5.1 PLM-VideoBench" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.161, + 0.828, + 0.218 + ], + "angle": 0, + "content": "Our high-quality human-annotated data offers VLMs to train for broader range of capabilities for holistic video understanding. However, existing video benchmarks are not adequately equipped to evaluate these. To this end, we introduce PLM-VideoBench, a novel benchmark focusing on specific activities (what) and their execution details (how) within spatio-temporal contexts (where and when)." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.226, + 0.825, + 0.391 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.396, + 0.825, + 0.437 + ], + "angle": 0, + "content": "Figure 6: PLM-Video Dataset includes fine-grained video QA (FGQA), open-ended QA in videos recorded using smart glasses (SGQA), Spatio-Temporal Captions (STC) post-processed into video region captioning (RCap), video region temporal localization (RTLoc) and video region dense captioning (RDCap) tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.455, + 0.827, + 0.54 + ], + "angle": 0, + "content": "Fine-Grained Question Answering (FGQA). In this task, a model must answer a multiple-choice question (MCQ) that probes nuanced, fine-grained activity understanding (e.g., painting \"vertically\" vs. \"horizontally\" in Fig. 6, first). We report multi-binary accuracy (MBAcc) [99] where each question is split into multiple binary choice questions. Our test set consists of 4,371 question-answer pairs. For more information, including statistics on video clips, segment duration, question types, and benchmark construction, see Table 18 and §G.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.828, + 0.664 + ], + "angle": 0, + "content": "Smart Glasses Question Answering (SGQA). In this task, a model must answer open-ended questions about activities and objects visible in an egocentric video stream recorded by a smart-glasses device (see Fig. 6, second). The questions are designed to simulate real-world scenarios where a user would ask for assistance from their smart glasses. We manually collect the videos using commercially available smart glasses, providing a completely new, unique dataset that reflects modern use-cases such as online AI video assistance and activity coaching. For evaluation, we use LLM-judge accuracy with an open-access model (Llama3.3 70B). The test set consists of 665 human-annotated question-answer pairs. See Appendix I for more details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.677, + 0.826, + 0.777 + ], + "angle": 0, + "content": "Video Region Captioning (RCap). In this task, a model must generate a detailed description of an event involving a subject of interest in the video. Given a region masklet and a specified time interval, the model is required to output a caption that accurately describes the event occurring within that interval. Compared to traditional video captioning [125, 83, 84] where the aim is to generate a video-level caption, the goal is to generate a region-level caption tied to a specific subject (e.g., a person, object or animal) (see Fig. 6, third). The test set contains 10,060 human-annotated instances and we report LLM-judge accuracy with Llama3.3 70B. See Appendix C.3 for details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Region Temporal Localization (RTLoc). In this task, a model must identify the precise time interval within the video when the specified event takes place for the given subject. Given a video, a region masklet and a text description of the event, the model is required to output the start and end timestamps that correspond to the occurrence of the event (see Fig. 6 fourth). Notably, this task is the inverse of RCap — instead of generating the caption, the model receives it as input and generates the corresponding time interval. We filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap. We report average recall@1 over IoU thresholds (0.3, 0.5, 0.7, 0.9). See Appendix C.3 for details." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.204 + ], + "angle": 0, + "content": "Region Dense Video Captioning (RDCap). In this task, a model must generate a detailed description of all events involving a specific subject of interest (e.g., person, animal, or object) in a video. Given a video and a region masklet, the model must produce a sequence of (start, end, caption) tuples that cover the entire duration of the video, including periods when the subject is not visible (see Fig. 6, last). This task is a composition of RTLoc and RCap, requiring the model to produce both temporal windows for events as well as captions directly from the video. The test set contains 2,620 samples and we report the SODA score [126] which uses an LLM judge. See Appendix C.3 for details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.222, + 0.314, + 0.239 + ], + "angle": 0, + "content": "6 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.825, + 0.294 + ], + "angle": 0, + "content": "We first overview the baselines and evaluation setting (§6.1). We then compare benchmark results of PLMs with the baselines on a broad collection of image (§6.2) and video (§6.3) tasks as well as on our PLM-VideoBench (§6.4). Finally, we provide analyses on data and model ablations (§6.5)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.303, + 0.254, + 0.318 + ], + "angle": 0, + "content": "6.1 Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.328, + 0.606, + 0.343 + ], + "angle": 0, + "content": "We compare PLMs against the following two classes of baselines:" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.346, + 0.825, + 0.375 + ], + "angle": 0, + "content": "- Proprietary models such as GPT-4o [33] (gpt-4o-2024-11-20), Gemini-Pro 1.5 [34] and Gemini-Flash 2.0 [35]. We use API calls to evaluate these models." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.377, + 0.825, + 0.418 + ], + "angle": 0, + "content": "- Open-access models such as Molmo-O [11], LLaVA-OneVision [28], Qwen2.5-VL [106] and InternVL2.5 [10] — state-of-the-art open-access models, for which model scale, architecture and inference code are available. We use the official inference code for all models." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.346, + 0.825, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.825, + 0.501 + ], + "angle": 0, + "content": "Inference protocol. For mask inputs in PLM-VideoBench, we overlay a colored box on the video frames to specify the regions. We report validation set performance unless specified (in brackets) under the benchmark name. Metrics marked with \\(\\dagger\\) use LLM as a judge. Complete implementation details including inference hyper-parameters, task prompts, judge prompts and proprietary model evaluation protocol can be found in Appendix C.4." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.509, + 0.398, + 0.524 + ], + "angle": 0, + "content": "6.2 Image Benchmark Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.528, + 0.827, + 0.598 + ], + "angle": 0, + "content": "We evaluate PLM on a total of 20 image benchmarks. Charts, Diagrams and Documents: answer questions that require parsing images of documents and diagrams; Image Captioning: generate a short/detailed caption, Perception and Reasoning: answer questions of varying difficulty about objects, actions, functional correspondence, multi-view reasoning, spatial layout etc. and Hallucination: evaluate robustness to hallucinated details. More details are in Appendix C.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Table 3 shows our results. Overall, PLM shows strong performance on a wide spectrum of image benchmarks with solely from open-access data with a white-box data engine. Additionally, we report" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.651, + 0.825, + 0.896 + ], + "angle": 0, + "content": "
ModelCharts, Diagrams and DocumentsPerception and ReasoningHard PerceptionHalluc.
DocVQA (test) acc [53]CharQA acc [54]TextVQA acc [52]InfoQA (test) acc [56]AL2D (n/o mask) acc [55]OCR-Bench acc [57]MMMU (rval) acc [37]VQA2 (rval) acc [111]OK-VQA acc [39]VizWiz acc [40]SEED (image) acc [58]BLINK (multi-image) acc [44]CV-Bench acc [19]RealWorldQA acc [45]VSR acc [127]POPE acc [68]
GPT-4o [33]92.8*85.7*75.380.7*94.2*81070.7*-63.9-77.1*68.0*72.573.978.087.2*
Gemini 1.5 Pro [35]94.084.274.881.0*95.783063.2-63.9-77.859.881.066.376.188.2*
Gemini 2.0 Flash [35]93.084.880.281.094.079269.9*-57.8-77.064.482.371.974.8-
1B scale
Qwen2VL-2B [30]90.1*75.380.365.5*84.6*809*41.1*80.059.767.472.944.4*17.362.6*73.087.2
InternVL2.5-1B [10]84.8*75.9*72.0*56.0*77.8*785*40.9*72.251.547.471.342.442.158.365.490.2
PLM-1B90.778.682.163.084.980734.881.761.059.776.346.873.867.168.888.4
3B scale
Qwen2.5 VL-3B [106]93.9*83.179.3*77.1*90.2797*53.1*80.863.271.973.147.6*54.465.4*78.588.2
InternVL2.5-4B [10]91.6*84.0*79.372.1*90.5*828*52.3*80.964.061.875.650.8*55.964.680.091.0
PLM-3B93.884.384.374.690.983041.284.366.864.078.555.481.472.480.488.7
8B scale
Molmo-7B-O [11]90.8*80.4*80.4*70.0*90.7*-39.3*85.3*-----67.5*--
LLaVA-OV-7B [28]86.780.077.368.890.165648.983.569.663.476.449.475.066.778.189.2
Qwen2.5VL-7B [106]95.7*87.3*84.9*82.6*93.0864*58.6*70.161.073.573.256.4*11.969.880.387.2
InternVL2.5-8B [10]93.0*84.8*79.377.6*92.8*82356.0*80.669.264.377.654.8*53.970.1*80.090.6*
PLM-8B94.685.586.580.992.787046.185.669.667.079.356.081.375.082.889.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.901, + 0.825, + 0.928 + ], + "angle": 0, + "content": "Table 3: Image benchmarks. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature, and the remaining are reproduced using official code." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.179, + 0.082, + 0.825, + 0.326 + ], + "angle": 0, + "content": "
ModelVCap.Video QAFine-grained Video QAT.Loc.Halluc.
DREAM-1K F/F [86]MVBench acc [70]NEX-TQA acc [69]PerceptionTest (test) acc [71]STAR acc [72]Video-MME acc [75]ActivityNet-QA acc [76]EgoSchemas (test) acc [90]TemporalBench MBA acc [99]TOMATO MBO acc [100]MotionBench (dev) acc [101]TempCompass (MCC) acc [102]CG-Bench (clue) acc [97]Charades-STA mOU [113]VideoHallucer overall acc [88]EventHallusion (binary) acc [89]
Proprietary
GPT-4o [33]-64.6*79.1-70.471.9*-72.2*38.5*37.7*55.974.558.3*38.656.491.9*
Gemini 1.5 Pro [35]-60.5*81.665.9-75.0*56.7*71.2*34.732.056.175.650.1*34.256.080.9
Gemini 2.0 Flash [35]-60.781.9--70.3*-71.5*27.632.856.176.947.0*29.860.181.6
1B scale
Qwen2VL-2B [30]26.863.2*76.453.9*67.355.6*38.427.013.125.746.962.342.80.334.959.9
InternVL2.5-1B [10]27.764.874.359.473.050.3*60.755.727.725.045.056.440.90.831.038.9
PLM-1B34.370.180.372.783.749.262.560.418.225.552.264.643.655.249.279.5
3B scale
Qwen2.5 VL-3B [106]20.367.076.866.9*63.061.5*59.264.8*17.223.549.263.045.738.8*45.253.5
InternVL2.5-4B [10]29.271.782.567.977.262.3*64.166.623.727.452.765.252.08.449.666.3
PLM-3B37.474.783.479.384.854.966.266.923.430.960.469.347.257.755.576.5
8B scale
LLaVA-OV-7B [28]28.057.181.058.166.057.760.545.419.527.653.767.841.212.134.761.1
Qwen2.5VL-7B [106]23.369.6*80.070.5*68.165.5*63.765.0*24.524.651.171.7*49.843.6*50.161.1
InternVL2.5-8B [10]28.572.685.568.9*77.664.2*66.166.2*24.329.453.568.3*53.114.357.160.2
PLM-8B35.977.184.182.784.958.367.368.828.333.261.472.746.458.657.777.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.332, + 0.825, + 0.359 + ], + "angle": 0, + "content": "Table 4: Video benchmark results. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature and the remaining are reproduced using official code." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.369, + 0.825, + 0.397 + ], + "angle": 0, + "content": "Image Grounding task results on RefCOCO/+/g [65] datasets in Appendix Table 14, and show that PLM outperforms both specialist models as well as the VLM baselines in all model scales." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.41, + 0.395, + 0.423 + ], + "angle": 0, + "content": "6.3 Video Benchmark Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.827, + 0.527 + ], + "angle": 0, + "content": "We evaluate PLM on a total of 25 video benchmarks. We divide these into the following categories. Video Captioning: generate a short caption for a video, or a dense description of all events; Short video QA: answer a question about a short video (few seconds to a minute), either by selecting from a list of options, or providing a free-form answer; Long video QA: answer a question as before, about a much longer video (minutes to hours); Fine-grained QA: answer detailed questions about spatial location, motion, temporal information etc.; and Hallucination: evaluate the robustness of video models to hallucinated details about objects and events." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.534, + 0.826, + 0.577 + ], + "angle": 0, + "content": "Table 4 shows video captioning, video QA, fine-grained video QA, and video hallucination results. We achieve strong results on widely adopted benchmarks, despite only using open-access data mix free from proprietary model artifacts, outperforming both the open-access and proprietary models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.582, + 0.825, + 0.639 + ], + "angle": 0, + "content": "Further, we achieve competitive performance on the majority of challenging benchmarks, such as EgoSchema (68.8 %), MotionBench (61.4 %), TOMATO (33.2 %), TempCompass (72.7 %), TemporalBench (28.3 &), Charades-STA (58.6 %), and more. All our model scales show strong performance against both proprietary models as well as open-access baselines of same scale." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.825, + 0.686 + ], + "angle": 0, + "content": "Lastly, we also show that PLMs at all scale greatly outperform existing approaches on captioning tasks and hallucination detection tasks, owing to our focus on detailed, fine-grained spatio-temporal annotations in our human-annotated data collection." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.699, + 0.395, + 0.712 + ], + "angle": 0, + "content": "6.4 PLM-VideoBench Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.719, + 0.502, + 0.913 + ], + "angle": 0, + "content": "We report the result on our proposed benchmark PLM-VideoBench from §5.1 in Table 5. We evaluate our PLM as well as (proprietary and open-access) baselines. In addition, we provide human performance of each subtask in the first row. The results show a significant gap between the baselines and PLM. Proprietary baselines and open-source baselines alike perform reasonably on FGQA tasks, though still 6.5 points lower than PLM (61.2 vs 67.7). On SGQA, where the video sources and the question-answer pairs are unseen to all models, PLM performs reasonably well, yet 2.1 points short from open-access best (InternVL2.5) and far from the best proprietary model" + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.698, + 0.822, + 0.869 + ], + "angle": 0, + "content": "
ModelFQQA MBAccSGQAc+†RDCap SDAD†RCap score†RTLoc meanRAvg.
Human perf.90.967.966.653.967.873.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
Open-access
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.511, + 0.875, + 0.825, + 0.914 + ], + "angle": 0, + "content": "Table 5: PLM-VideoBench results. We evaluate PLM against baselines and report breakdowns. We report human performance in the first row." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "(GPT-4o). On spatio-temporal tasks (RDCap, DCap, RTLoc), open source baselines are unable to perform grounded reasoning and default to repeating the same caption for every time interval. Proprietary models perform reasonably well, yet far from the human performance. In all sub-tasks of PLM-VideoBench, PLM shows competitive performance compared to proprietary and open-access baselines. Results for all model scales are in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.168, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Note that the human performance varies based on the nature of the task and evaluation metrics. For example, FGQA human scores are naturally higher than RCap because the task is structured (select the correct option vs. open-ended) and the metric is objective (accuracy vs. LLM-judge accuracy)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.226, + 0.33, + 0.24 + ], + "angle": 0, + "content": "6.5 Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.251, + 0.827, + 0.336 + ], + "angle": 0, + "content": "Setup. We perform an ablation study to assess the importance of each of our proposed data, both synthetic and human-annotated. We start with PLM 3B after stage 2 training, and finetune on 4M short image and video SFT data mix \\( {}^{2} \\) for the data ablation. We evaluate and report average video benchmark performance across five categories — video captioning, short video QA, fine-grained QA, and video hallucination, as well as spatial and temporal tasks, PLM-VideoBench and three image categories — image OCR, image captioning, and image perception. Full details are in Appendix A.3." + }, + { + "type": "table", + "bbox": [ + 0.186, + 0.349, + 0.649, + 0.472 + ], + "angle": 0, + "content": "
PLM-Synth.PLM-STCPLM-FGQATotal AveragePLM-VideoBenchVideo TasksImage Tasks
PLM-FGQAMBaccPLM-SGQAacc†3 metric avg.Fine-Grained QA5 benchmark avg.Video Cap.Dream 1KVideo QA5 benchmark avg.Video Hallu.2 benchmark avg.Spatial&Temp.4 benchmark avg.Image OCR6 benchmark avg.Image Cap.3 benchmark avg.Image Rec.5 benchmark avg.
XXX48.539.734.46.642.224.067.564.950.676.064.363.3
XX54.349.835.914.748.829.973.273.356.184.065.965.5
X57.949.936.242.148.632.373.974.262.983.867.565.0
X56.762.943.215.250.130.474.176.358.383.764.065.6
61.263.644.042.250.234.374.676.364.383.774.265.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.474, + 0.665, + 0.526 + ], + "angle": 0, + "content": "Table 6: Ablation. We show the impact of individual data components in PLM training. For this ablation, we use a reduced the SFT datamix consists of 4M open-access image and video data. Results are aggregated validation-set performance over selected benchmarks in each category of tasks, details in Appendix A.3." + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.366, + 0.807, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.499, + 0.826, + 0.526 + ], + "angle": 0, + "content": "Figure 7: HardQA improves with PLM data." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.549, + 0.827, + 0.674 + ], + "angle": 0, + "content": "Discussion. First, we observe that stage 2 synthetic data training boosts model performance across the board. Moreover, adding our PLM-STC data further improves a variety of benchmarks, including PLM-STC (+27.4 points), video captioning (+2.4 points), and most importantly, spatial and temporal tasks (+6.8 points). Adding our PLM-FGQA data improves a distinct set of categories for fine-grained activity understanding; PLM-FGQA (+13.1 points), PLM-SGQA (+7.3 points), Fine-grained video tasks (+1.3 points), video hallucination tasks (+3.0 points), and spatial and temporal tasks (+2.2 points). Using our human-annotated data altogether results in the best performance overall. Further in Fig.7, we show that our human-annotated data improves upon HardQA [97, 100, 89, 101, 99, 113, 92], effectively addressing the limitations of synthetic data discussed in §4.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.693, + 0.303, + 0.708 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.724, + 0.827, + 0.835 + ], + "angle": 0, + "content": "This work presents Perception Language Model (PLM), a fully-reproducible vision-language model to transparently tackle visual perception tasks without distillation of private black-box models. We trained PLM using data from existing open-access datasets and synthetic samples generated by our data engine. We identified gaps in detailed video understanding capabilities that cannot be filled with synthetic data. In response, we collected 2.8M human-labels for fine-grained video question answering and spatio-temporally grounded captioning, and created a new benchmark, PLM-VideoBench, to evaluate these capabilities. We hope our open dataset, benchmark, and models will foster transparent research in visual perception." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.819, + 0.912 + ], + "angle": 0, + "content": "23.8M datamix: TextQA 500K, Image QA 2.8M, and Video QA 500K. Each detail can be found in Tab. 9." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.427, + 0.088, + 0.574, + 0.119 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.129, + 0.356, + 0.148 + ], + "angle": 0, + "content": "Table of Contents" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.154, + 0.788, + 0.17 + ], + "angle": 0, + "content": "A PLM Training Details 12" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.171, + 0.786, + 0.185 + ], + "angle": 0, + "content": "A.1 PLM Training Setting 12" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.186, + 0.786, + 0.2 + ], + "angle": 0, + "content": "A.2 PLM Training Datamix 13" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.201, + 0.786, + 0.215 + ], + "angle": 0, + "content": "A.3 Ablation Experiment Details 14" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.171, + 0.786, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.226, + 0.786, + 0.242 + ], + "angle": 0, + "content": "B Synthetic Scaling Experiments 14" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.253, + 0.786, + 0.267 + ], + "angle": 0, + "content": "C VLM Benchmark Details 16" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.269, + 0.786, + 0.282 + ], + "angle": 0, + "content": "C.1 Image Benchmarks 16" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.284, + 0.786, + 0.298 + ], + "angle": 0, + "content": "C.2 Video Benchmarks 17" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.299, + 0.786, + 0.312 + ], + "angle": 0, + "content": "C.3 PLM-VideoBench 17" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.314, + 0.786, + 0.328 + ], + "angle": 0, + "content": "C.4 Evaluation Protocols 18" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.269, + 0.786, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.34, + 0.786, + 0.355 + ], + "angle": 0, + "content": "D Additional PLM-VideoBench Results 19" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.366, + 0.786, + 0.381 + ], + "angle": 0, + "content": "E Baseline Implementation Details 19" + }, + { + "type": "list", + "bbox": [ + 0.212, + 0.34, + 0.786, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.393, + 0.786, + 0.407 + ], + "angle": 0, + "content": "F Additional Results 20" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.409, + 0.786, + 0.422 + ], + "angle": 0, + "content": "F.1 Comparison with LLaMA-3V 20" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.424, + 0.786, + 0.438 + ], + "angle": 0, + "content": "F.2 Image Captioning 20" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.439, + 0.786, + 0.452 + ], + "angle": 0, + "content": "F.3 Image Grounding 21" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.453, + 0.786, + 0.468 + ], + "angle": 0, + "content": "F.4 Long Video Understanding 21" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.409, + 0.786, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.48, + 0.786, + 0.494 + ], + "angle": 0, + "content": "G PLM-FGQA: Fine-grained QA 22" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.496, + 0.786, + 0.509 + ], + "angle": 0, + "content": "G.1 Annotation process: Data Engine 22" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.51, + 0.786, + 0.524 + ], + "angle": 0, + "content": "G.2 FGQA PLM-VideoBench Construction 27" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.496, + 0.786, + 0.524 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.537, + 0.786, + 0.55 + ], + "angle": 0, + "content": "H PLM-STC Details 28" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.552, + 0.786, + 0.565 + ], + "angle": 0, + "content": "H.1 Annotation Process 28" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.567, + 0.786, + 0.58 + ], + "angle": 0, + "content": "H.2 PLM-STC Benchmark 30" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.552, + 0.786, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.593, + 0.786, + 0.607 + ], + "angle": 0, + "content": "I Smart Glasses Data 30" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.609, + 0.786, + 0.622 + ], + "angle": 0, + "content": "I.1 Data collection and annotation 30" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.624, + 0.786, + 0.637 + ], + "angle": 0, + "content": "I.2 SGQA Benchmark 31" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.609, + 0.786, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.65, + 0.786, + 0.664 + ], + "angle": 0, + "content": "J Synthetic Data Engine 31" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.676, + 0.786, + 0.69 + ], + "angle": 0, + "content": "K Qualitative Results 35" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.702, + 0.786, + 0.716 + ], + "angle": 0, + "content": "L Limitations and Future Work 39" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.729, + 0.786, + 0.743 + ], + "angle": 0, + "content": "M Broader Impact 39" + }, + { + "type": "list", + "bbox": [ + 0.212, + 0.65, + 0.786, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.396, + 0.108 + ], + "angle": 0, + "content": "A PLM Training Details" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.128, + 0.825, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.386, + 0.828, + 0.454 + ], + "angle": 0, + "content": "Figure 8: The figure provides an overview of the datasets used in the paper. PLM is trained with \\(47.8M\\) synthetic image and \\(18.4M\\) synthetic video, and \\(2.9M\\) human-labeled video samples. Our data enables PLM to perform a variety of tasks, including standard tasks like Image, Multi-image, and Video QA, as well as new video tasks such as Fine-grained QA (FGQA), Region Temporal Localization (RTLoc), Region Captioning (RCap), and Region Detailed Captioning (RDCap)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.465, + 0.825, + 0.509 + ], + "angle": 0, + "content": "In this section, we describe the training details of PLM. In §A.1 we describe exact details of training setting such as hyper-parameters and implementation details. In §A.2 we describe our datamix for both synthetically generated and human-annotated parts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.523, + 0.373, + 0.539 + ], + "angle": 0, + "content": "A.1 PLM Training Setting" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.579 + ], + "angle": 0, + "content": "For all three stages, we use AdamW optimizer [128] with weight decay of 0.05 and use FSDP [129] with FlashAttention2 [130] for overall implementation based on PyTorch [131]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.592, + 0.827, + 0.675 + ], + "angle": 0, + "content": "Stage 1 training. In stage 1, we use a subset of SA-1B [105] paired with detailed captions generated by our data engine (§4.1). We use total 1M samples to train PLM with next token prediction loss, with vision encoder and LLM parameters frozen. This stage is commonly known as warm-up stage. We use learning rate \\(1 \\times 10^{-4}\\) for all model scale with global batch size of 512 and \\(448 \\times 448\\) resolution. We use the Perception Encoder [104] L/14 variant for the 1B and 3B PLM models, and the G/14 variant for the 8B PLM model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.689, + 0.827, + 0.858 + ], + "angle": 0, + "content": "Stage 2 training. In Stage 2, we train on a total of 72.5M samples. Of these, 66M consist of images and videos with synthetically generated annotations produced by our data engine. The remaining 6.5M samples are a subset of human-annotated images and videos from open-source datasets, which are included in our final datamix described in §A.2. We train with global batch size of 2048, learning rate of \\(4 \\times 10^{-5}\\), weight decay of 0.05 for the full set of parameters (vision encoder, projector, and LLM). For both image and video input, we use \\(448 \\times 448\\) resolution for each tile/frame, which effectively generate 1024 vision tokens. We apply \\(2 \\times 2\\) spatial average pooling to reduce this to 256. We use dynamic tiling with a thumbnail to support any resolution and aspect ratio, similar to prior work [12], and uniform sampling of video frames after preprocessing the videos to 1 fps. We set the maximum number of tiles/frames to be 16, which results in maximum of \\((16 + 1) \\times 256 = 4352\\) and \\(16 \\times 256 = 4096\\) vision tokens respectively for images and videos. We train the model with a sequence length of 6144 allowing a maximum of 2048 tokens for the text modality." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Stage 3 training. In stage 3, we use total of 19.1M high-quality datamix spanning over multiple image, video, and text modalities. We describe this datamix in §A.2. In this stage, we use global batch size of 1024, learning rate of \\(1 \\times 10^{-5}\\) for 8B and \\(4 \\times 10^{-5}\\) for 1B and 3B PLM models. We" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.15 + ], + "angle": 0, + "content": "train the full set of parameters for all scales. Similar to stage 2, we adapt dynamic tiling and uniform frame sampling for up to 36 tiles for image and 32 frames for video, with \\(2 \\times 2\\) spatial average pooling, which generates \\((36 + 1) \\times 256 = 9472\\) vision tokens for image and \\(32 \\times 256 = 8192\\) vision tokens for video. For all modalities, we use 11264 maximum training sequence length." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.18, + 0.383, + 0.196 + ], + "angle": 0, + "content": "A.2 PLM Training Datamix" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.211, + 0.827, + 0.351 + ], + "angle": 0, + "content": "Table 9 presents the full data mix used across all training stages apart from our manually collected data in §5. This contains annotations from existing public datasets as well as synthetically generated data (see §4). We filter and include a wide variety of existing datasets spanning across images (captioning, QA, grounding), videos (captioning, QA, temporal localization, region captioning and dense captioning) and text-only datasets to preserve the text-instruction following capabilities of our model. Most importantly, we filter out every dataset that contains annotations generated by proprietary models. Table 7 and Table 8 shows the exact number of samples for each datasets in Stage 2 and Stage 3 respectively. Marjory of the data in stage 2 are synthetic, with a focus on captioning samples, since they carry the dense information about the image or video. In stage 3, we have one third of the data, mostly focusing on human annotated samples, covering a large variety of tasks." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.376, + 0.809, + 0.793 + ], + "angle": 0, + "content": "
DatasetNum SamplesTypeDatasetNum SamplesType
Image SyntheticImage Synthetic
PDFAcc (QA) [132]12MQAPDFAcc (QA) [132]2MQA
PDFAcc (Cap) [132]12MCap.ArxivCap [134]1.5MCap./QA
UCSF [133]6MQASA1B [105]800KCap.
ArxivCap [134]1.8MCap./QAObject365 [135]300KCap.
SA1B [105]10MCap.OpenImages [136]300KCap.
Object365 [135]3.5MCap.DocVQA [53]100KQA
OpenImages [136]1.8MCap.InfographicVQA [56]50KQA
DocVQA [53]50KQAPixmoCap [11]500KCap
InfographicVQA [56]20KQAVideo Synthetic
PixmoCap [11]600KCapYT-1B (QA) [137]300KMCQA
Video SyntheticEgo4D (Cap.) [115]180KCap.
YT-1B (Cap.) [137]14MCap.Ego4D (QA) [115]700KQA
YT-1B (QA) [137]3MMCQASpoken Moments [138]449KCap.
Ego4D (Cap.) [115]180KCap.Charades [139]8KCap.
Ego4D (QA) [115]700KQAKinetics710 [121]40KCap.
Spoken Moments [138]449KCap.DiDeMo [140]7.5KCap.
Charades [139]8KCap.Text Synthetic
Kinetics710 [121]40KCap.NaturalReasoning [141]1MQA
DiDeMo [140]7.5KCap.Human Annotated
Text SyntheticImage QA [9]2.8MQA
NaturalReasoning [141]1MQAImage Cap [9]36KQA
Human AnnotatedImage Grnd. [9]1.4MQA
Image QA [9]2.8MQAImage Misc. [9]1.4MQA
Video QA [9]570KQAVideo QA [9]570KQA
Video TL [9]16KTemp. Loc.Video Cap. [9]315KQA
Video Dense Cap. [9]10KDense Cap.Video TL [9]16KTL
Text QA [9]2MMixVideo Dense Cap. [9]10KDCap.
Total72.5MVideo Region Captioning [9]15KCap.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.795, + 0.436, + 0.81 + ], + "angle": 0, + "content": "Table 7: PLM Stage 2 training data mix." + }, + { + "type": "table_caption", + "bbox": [ + 0.52, + 0.863, + 0.787, + 0.878 + ], + "angle": 0, + "content": "Table 8: PLM Stage 3 training data mix." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.089, + 0.318, + 0.427 + ], + "angle": 0, + "content": "
DatasetSize
DVQA [142]222222
PlotQA [143]157070
MapQA [144]42761
OCRVQA [145]167646
Localized Narratives [146]199998
FigureQA [147]119999
Hateful Memes [148]9713
CLEVR [149]73181
CLEVR v.0 [149]70000
IconQA [150]116514
TextVQA [112]21953
GeomVerse [151]11162
RobuT (wikiqsl) [152]80757
WebSight [153]10000
Visual7W [154]15961
TallyQA [155]100050
Robust (WTO) [152]42495
DaTik [156]47974
CocoQA [157]46287
ChartQA [109]27395
VQAv2 [111]82772
Chart2Text [158]35946
VisText [159]35995
FinQA [160]5276
DocVQA [53]12089
STVQA [161]18684
TAT-QA [162]2199
RenderedText [163]10435
RAVEN [164]31418
IAM [165]7549
A-OKVQA [39]17720
TabMWP [166]45439
CocoQA [157]9009
TextCaps [167]21953
Screen2Words [168]16713
VSR [169]2157
TQA [170]9742
Robust (SQA) [152]12769
VisualMRC [171]3027
ScienceQA [61]9947
VQA-RAD [172]313
InfographicVQA [56]2118
Hitab [173]4995
AI2D [55]4863
Inter-GPS [174]2555
diagram_image_to_text [175]595
MIMIC-IT (CGD) [176]70539
MultiHiert [177]15233
NLVR2 [178]136799
RAVEN (Multi-image) [164]56081
SpotTheDiff [179]19340
" + }, + { + "type": "table", + "bbox": [ + 0.325, + 0.089, + 0.495, + 0.429 + ], + "angle": 0, + "content": "
DatasetSize
STAR [72]3032
NeXT-QA [69]3870
VISION [180]9900
FlinstonesSV [181]22341
ImageCoDe [182]16594
VizWiz [40]4900
MIT-States (State Coherence) [183]1900
MIT-States (Prop. Coherence) [183]1900
WebQA [184]9338
Birds-to-Words [185]14281
AESOP [186]6915
RecipeQA (Img. Coherence) [187]8699
CLEVR-Change [188]3885
IEEdit [189]3456
ChartQA [109]45820
DocVQA [53]69562
InfographicVQA [56]32661
TextVQA [112]69170
TextCaps [167]21324
VisualMRC [171]24456
WTQ [190]16885
HME100k [191]74492
chrome_writing [163]8825
OK-VQA [110]27536
GeometrySk [174]4802
VQA-RAD [172]1793
Total2796145
Image Cap.
DatasetSize
DOCCI [192]13362
DCI [193]7599
Altogether [194]15166
Total36127
Image Misc.
DatasetSize
AI2d [55]12413
COCO cap. [49]414113
GQA-Balanced [195]943000
Total1369526
" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.089, + 0.655, + 0.437 + ], + "angle": 0, + "content": "
Grounding
DatasetSize
VisualGenome [66]154792
Flickr Entities [196]296332
DCI (Region Caption) [193]304912
RefCOCO/g+/ [197]212923
VCR [60]855577
Total1398690
Image Synth.
DatasetSize
DocVQA [53]50170
InfographicVQA [56]21660
PDFAcc (Cap.) [132]12024670
PDFAcc (QA) [132]12024670
UCSF [133]5953490
ArxivCap [134]1859680
SAIB [105]9834573
Object365 [135]3484584
OpenImages [136]1740864
PixmoCap [11]584650
Total47579011
Video QA
DatasetSize
EgoQA [119]7813
NExT-QA (instruct) [69]34114
NExT-QA (MCQ) [69]34114
PerceptionTest [71]2403
ActivityNetQA [76]23530
VideoInstruct (human) [20]25803
CLEVERR (MC) [120]42620
CLEVERR (QA) [120]40000
Kinetics710 [121]39949
SVv2 (classification) [122]40000
VdLNN [123]43126
VdLNN (QA) [123]75090
How2QA [8]45731
STAR [72]35297
Memento [198]40060
Memento-MultImage [198]40060
Total569710
Video Cap.
DatasetSize
VATEX (en caption) [84]259910
Charades (caption) [139]11593
ActivityNet (captions) [125]33375
YouCook2 [83]10337
Total315215
" + }, + { + "type": "table", + "bbox": [ + 0.667, + 0.089, + 0.813, + 0.402 + ], + "angle": 0, + "content": "
Video Temporal Loc.
DatasetSize
HiREST [199]7919
Charades [139]7566
DiDeMo [140]435
Total15920
Video Region Captioning
DatasetSize
HC-STVG [200]10131
VidLN (UVO subset) [123]5296
Total15427
Video Dense Cap.
DatasetSize
ActivityNet [125]8859
YouCook [83]1039
Total9898
Video Synth.
DatasetSize
Spoken Moments [138]449044
Charades [139]7919
Kinetics710 [121]39949
DiDeMo [140]7566
Ego4D (Cap.) [115]183029
Ego4D (QA) [115]703935
YT-1B (Cap.) [137]14792983
YT-1B (QA) [137]3383670
Total19568095
Text-QA
DatasetSize
no robots [201]9485
MathQA [202]29837
LIMA [203]1030
GSM8k (socratic) [204]7473
GSM8k [204]7473
FLAN [205]156050
Dolly15k [206]15011
Maggie Pro (MT) [207]300000
Maggie Pro [207]300000
Total2056359
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.444, + 0.825, + 0.501 + ], + "angle": 0, + "content": "Table 9: PLM training datamix. Our mix includes synthetic and manually annotated data across a combination of image data (QA, captioning, OCR, Visual grounding), video data (captioning, grounded captioning, dense captioning, temporal localization) and text-only data. Importantly, all data is publicly accessible, and not generated by proprietary models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.528, + 0.416, + 0.542 + ], + "angle": 0, + "content": "A.3 Ablation Experiment Details" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.553, + 0.827, + 0.692 + ], + "angle": 0, + "content": "We provide additional details about the ablation experiment in §6.5. We report benchmark average scores across 5 categories, along with the average across all of them. We select a representative set of benchmarks from the full set of image and video benchmarks in §6.2 and §6.3 that report comparable scores so the average results are meaningful. For Video captioning we select Dream 1K and report the LLM-judge score with Llama3.3 70B as judge. for Short Video QA, and Finegrained QA, we select benchmarks that report MCQ accuracy (and exclude open-ended QA). For Hallucination, we include both benchmarks. For Spatial and Temporal tasks, we select BLINK, CVBench, VSR, and Charades-STA. For Image Perception, we choose SEED, MMMU, VQAv2, OK-VQA, and VizWiz. We train the ablation setup of SFT with the exactly matching hyperparameters as our final run; only difference is the size of the SFT datamix." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.712, + 0.466, + 0.73 + ], + "angle": 0, + "content": "B Synthetic Scaling Experiments" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.744, + 0.828, + 0.829 + ], + "angle": 0, + "content": "In this section we provide additional results to the synthetic scaling experiments in §4.2. We report aggregate benchmark accuracies across three categories — Video QA, OCR QA and Image QA — by selecting representative benchmarks from each category. For VideoQA, these are STAR [72], EgoSchema [90], MVBench [70], VideoMME [75] and PerceptionTest [71]; For OCR QA, these are ChartQA [109], DocVQA [53], InfographicsQA [56], TextVQA [112] and OCRBench [57]; and for Natural Image QA, these are RealworldQA [45], OKVQA [110], VQAv2 [111], and VizWiz [40]." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.843, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Scaling with encoder size. After investigating the impact of the LLM decoder in Fig. 2, we examine the impact of increasing the vision encoder size from 300M (PE Large) to 2B (PE Giant) for each language model scale next. In Fig. 9, we overlay the new power-law with the 2B vision encoder (black dashed) line onto the 300M (red dashed) line. Notably, we find that the larger vision encoder \\((300\\mathrm{M}\\rightarrow 2\\mathrm{B})\\) leads to greater scaling trend on video QA benchmarks. Quantitatively, the power law" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.096, + 0.415, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.417, + 0.097, + 0.616, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.097, + 0.818, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.249, + 0.825, + 0.275 + ], + "angle": 0, + "content": "Figure 9: Scaling with encoder size. Scaling trends of PE-G vs. PE-L vision encoders. Larger encoders scale better in Video QA tasks while similar scaling in OCR and Natural QA is seen." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.313, + 0.825, + 0.356 + ], + "angle": 0, + "content": "fit has improved from \\(-0.15\\) to \\(-0.19\\). The two lines intersect around 8B scale with PE-G, proving that 8B and larger PLM will benefit more with larger vision encoder. We use PE-L for 1B and 3B LLM scale and PE-G for 8B scale by default." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.386, + 0.413, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.415, + 0.388, + 0.616, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.388, + 0.818, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.54, + 0.825, + 0.567 + ], + "angle": 0, + "content": "Figure 10: Scaling with input size. Scaling trends of training with 16 tiles/frames vs. 8 tiles/frames. Higher input size scales better in Video QA and OCR QA tasks while similar trend is seen for Natural QA." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.615, + 0.827, + 0.782 + ], + "angle": 0, + "content": "Scaling with input size. In Fig. 10, we show the impact of increasing the input size to VLM through higher image resolution and more video frames. In this setting, each scale of PLM trains with dynamic tiling for image input and uniform sampling for video input with maximum 8 or 16 tiles/frames per sample. In each plot, the average error of PLM trained with 16 tiles/frames are plotted. All models use \\(2 \\times 2\\) spatial average pooling before input to LLM, and each tile/frame has \\(448 \\times 448\\) resolution. Similar to Fig. 2, we show power law fit with a black dashed line, and compare to 8 tiles/frames training denoted with red dashed line. Notably, we find out that on Video QA and OCR QA benchmarks, PLM shows better scalability with training with higher input size. This means with the same FLOP counts at \\(10^{13}\\), training with 16 frames makes 2.0 points of metric error lower than 8 frames counterpart (32.2 vs 30.2). Similar trends are observed with OCR QA going from 8 tiles max. to 16 tiles max. Notably, higher resolution did not make a difference for Natural QA tasks. We chose the 16 max-tiles and frames to be our final training setting for stage 2 PLM." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.787, + 0.827, + 0.913 + ], + "angle": 0, + "content": "In Fig. 11, we show the breakdown of the scaling trend shown in §4.2. “H” stands for human only (i.e., no synthetic) baseline. From the breakdown, the most notable point is the the scalability in OCR, Chart, Document QA tasks. In each benchmark, synthetic data makes more than 10 points of improvement on every model scale, compared to “no synthetic” baselines. Moreover, there is no sign of saturation; the performance will most likely improve with more synthetic data. We hypothesize that OCR, Chart, Document QA tasks reduce to “translation” task — a set of pixels has one-to-one mapping to text space. Remaining tasks exhibit clean power-law relationship between metric error and FLOPs. The last plot shows scaling trend on average over all benchmarks, which shows a close power-law relationship." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.181, + 0.096, + 0.346, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.097, + 0.501, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.097, + 0.66, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.097, + 0.816, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.217, + 0.346, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.218, + 0.5, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.218, + 0.657, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.218, + 0.815, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.338, + 0.346, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.338, + 0.5, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.338, + 0.66, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.338, + 0.815, + 0.45 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.459, + 0.346, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.459, + 0.501, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.459, + 0.658, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.703, + 0.511, + 0.799, + 0.52 + ], + "angle": 0, + "content": "Power Law Fit" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.663 + ], + "angle": 0, + "content": "Figure 11: Synthetic Scaling Plots. Relationship between Average Error and training compute (in floating-point operations) for various 1B, 3B, 8B PLM with L14 vision encoder. Each plot reports the individual error in VideoMME [75], STAR [72], EgoSchema [90], How2QA [8], MVBench [70], PerceptionTest [71], ChartQA [109], DocVQA [53], InfoVQA [56], OCRBench [57], RealworldQA [45], OKVQA [110], VQAv2 [111], VizWiz [40], and TextVQA [112]. Finally, we report Avg. All, which average over all the metrics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.665, + 0.422, + 0.68 + ], + "angle": 0, + "content": "C VLM Benchmark Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.825, + 0.766 + ], + "angle": 0, + "content": "In this section, we provide details about all the image and video benchmarks considered in §6 including composition and evaluation metrics for image benchmarks (§C.1), video benchmarks (§C.2) and our PLM-VideoBench (§C.3. We also describe evaluation protocol for all these benchmarks including inference parameters and prompts (§C.4). Pointers to evaluation code are linked where available." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.785, + 0.353, + 0.799 + ], + "angle": 0, + "content": "C.1 Image Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.811, + 0.826, + 0.84 + ], + "angle": 0, + "content": "Image captioning We evaluate on single image captioning and grounded image captioning benchmarks like COCO [49], nocaps [50] and Flickr [51]. We report CIDEr as the evaluation metric." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Perception and reasoning We evaluate on broad, general purpose VQA benchmarks like MMMU [37], VQAv2 [111], MMBench [38], OK-VQA [39], VizWiz [40] as well as hard perception benchmarks like BLINK [44], CV-Bench [19], RealWorldQA [45], and VSR [127]. For all MCQ benchmarks, we report accuracy of selecting the correct option." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Charts, diagrams and documents We evaluate on benchmarks for reasoning over various types of charts, graphs, diagrams, infographics etc. Specifically, DocVQA [53], ChartQA [54], TextVQA [52], InfographicsVQA [56], AI2D [55], OCRBench [57], and SEED [58]. We report accuracy of selecting the correct option." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.163, + 0.828, + 0.207 + ], + "angle": 0, + "content": "Image Hallucination Finally, we evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as HallusionBench [67] and POPE [68]. For HallusionBench we report the \\(aAcc\\) metric (code) which accounts for correctness and consistency using an LLM judge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.221, + 0.35, + 0.236 + ], + "angle": 0, + "content": "C.2 Video Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.246, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Video captioning We evaluate on short-video captioning benchmarks, namely YouCook2 [83] and VATEX [84] as well as recent detailed video captioning benchmarks — DREAM-1k [86] and AuroraCap-VDC [87]. For YouCook2 and VATEX, we report CIDEr score [208]. For DREAM-1k we report AutoDQ F1-score (code) and for AuroraCap-VDC we report the VDC accuracy (code) following the author's proposed metric." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.827, + 0.416 + ], + "angle": 0, + "content": "Short video QA We evaluate on multiple-choice (MCQ) benchmarks such as How2QA [8], NNextQA [69], PerceptionTest [71], STAR [72], TGIF-QA [73], TVQA [74], Video-MME [75] and TVBench [80]. We report accuracy of selecting the correct option. We also evaluate on open-ended question answering benchmarks (w/o options) such as ActivityNet-QA [76] (code), MMBenchVideo [79] (code) and VCGBench-Diverse [22]. We report LLM-judge scores/accuracies for these benchmarks. For VCGBench-Diverse, we report the average of 5 LLM-judge scores (code)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.826, + 0.473 + ], + "angle": 0, + "content": "Long video QA We evaluate on popular long-video benchmarks such as EgoSchema [90], LVBench [92], LongVideoBench [94] and MLVU [96]. We report accuracy of selecting the correct option." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.826, + 0.556 + ], + "angle": 0, + "content": "Fine-grained video QA We evaluate on benchmarks for fine-grained spatial, temporal and detail reasoning in videos such as TemporalBench [99], TOMATO [100], MotionBench [101], TempCompass [102] and CG-Bench [97]. We report accuracy of selecting the correct option. For Temporal-Bench, we report the multi-binary accuracy (MBAcc) (code) proposed by the authors to reduce bias in evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Hallucination We evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as VideoHallucer [88] and EventHallusion [89]. We report accuracy of selecting the correct option." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.63, + 0.345, + 0.644 + ], + "angle": 0, + "content": "C.3 PLM-VideoBench" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.827, + 0.683 + ], + "angle": 0, + "content": "We evaluate on our suite of benchmarks for fine-grained and spatio-temporal reasoning in videos. These include:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.699, + 0.826, + 0.769 + ], + "angle": 0, + "content": "Fine-grained QA (FGQA) We report multi-binary accuracy (MBAcc) following prior work [99]. In short, this entails presenting the model multiple independent, binary-choice questions about the same video (in our case, three questions) and requiring the model to gets all of them correct, to count towards accuracy. This sets a higher bar for models, and combats bias in multiple-choice question benchmarks that prior work identifies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.784, + 0.825, + 0.827 + ], + "angle": 0, + "content": "SmartGlasses-QA (SGQA) We report LLM-judge accuracy of the predicted answer compared to the ground truth answer. We follow existing LLM judge prompts from ActivityNetQA (code). The prompt is repeated below for completeness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.825, + 0.87 + ], + "angle": 0, + "content": "Video Region Captioning (PLM-RCap) We use an LLM-judge to generate the similarity scores between predicted and ground truth captions. The prompt is below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "**Dense Video Region Captioning (PLM-RDCap)** We adapt the SODA metric [126] from dense video captioning literature for this task. To compute this metric, we use the same LLM-judge from" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.12 + ], + "angle": 0, + "content": "above to generate the pairwise similarity scores between predicted and ground truth captions, which is then fed to the standard metric computation routine." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.169 + ], + "angle": 0, + "content": "Region Temporal Localization (PLM-RTLoc) We report standard temporal localization metrics, namely Mean Recall@1, averaged over a range of IoU thresholds [0.3, 0.5, 0.7, 0.9]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.188, + 0.363, + 0.203 + ], + "angle": 0, + "content": "C.4 Evaluation Protocols" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.827, + 0.287 + ], + "angle": 0, + "content": "Common evaluation protocol. For video benchmark evaluations, we sample 32 frames uniformly from the full video unless otherwise specified. For uniformity and consistency across benchmarks, we implement all LLM-judge evaluations using LLama3.3-70B-Instruct [13], following LLM judge prompts from popular evaluation frameworks [209, 210] where available. Outputs from all models are generated via greedy sampling (temperature 0)." + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.299, + 0.319, + 0.312 + ], + "angle": 0, + "content": "SG-QA judge prompt" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.316, + 0.816, + 0.367 + ], + "angle": 0, + "content": "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.379, + 0.294, + 0.39 + ], + "angle": 0, + "content": "##INSTRUCTIONS:" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.392, + 0.796, + 0.405 + ], + "angle": 0, + "content": "- Focus on the meaningful match between the predicted answer and the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.405, + 0.561, + 0.417 + ], + "angle": 0, + "content": "- Consider synonyms or paraphrases as valid matches." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.417, + 0.678, + 0.43 + ], + "angle": 0, + "content": "- Evaluate the correctness of the prediction compared to the answer." + }, + { + "type": "list", + "bbox": [ + 0.179, + 0.392, + 0.796, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.442, + 0.646, + 0.455 + ], + "angle": 0, + "content": "Please evaluate the following video-based question-answer pair:" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.455, + 0.334, + 0.467 + ], + "angle": 0, + "content": "Question: [question]" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.468, + 0.362, + 0.48 + ], + "angle": 0, + "content": "Correct Answer: [target]" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.48, + 0.399, + 0.492 + ], + "angle": 0, + "content": "Predicted Answer: [candidate]" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.492, + 0.816, + 0.581 + ], + "angle": 0, + "content": "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. For example, your response should look like this: {\"pred\": \"yes\", \"score\": 4.8}." + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.597, + 0.34, + 0.61 + ], + "angle": 0, + "content": "PLM-RCap judge prompt" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.613, + 0.816, + 0.689 + ], + "angle": 0, + "content": "Your task is to compare a given pair of captions and provide a single score indicating how correct the pred is compared to GT, on a scale from 0 to 10. Focus on meaning and context, not exact word matches. Penalize missing and incorrect information, with lower scores for more significant errors. High scores require accurate conveyance of all key GT information. Respond with only the score, starting your response with the number and including no additional text. Output format: [score]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.711, + 0.825, + 0.782 + ], + "angle": 0, + "content": "PLM-VideoBench inference prompts. Table 10 contains example inference prompt examples for each PLM-VideoBench task. Note that some variation exists between instances in the benchmark. For example, for RCap a prompt may be \"What is happening to the subject in the region highlighted by the red rectangle ...\" instead of \"Give a detailed description of the events occurring in the region marked by the red rectangle ...\" however they convey the same underlying instruction and information." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.787, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Proprietary models like GPT-4o and Gemini require more careful prompting to ensure that the output formatting is respected. For example, we append instructions to prevent model hallucinations (e.g., \"You must use these frames to answer the question; do not rely on any external knowledge or commonsense\"), to prevent refusals to answer (e.g., \"Even if the information in these separate frames is not enough to answer the question, please try your best to guess an answer which you think would be the most possible one based on the question. Do not generate answers such as not possible to determine\") and in-context examples to help guide the model towards the correct output format. Model- and benchmark-specific inference prompts will be released along with our code for full reproducibility." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.089, + 0.825, + 0.328 + ], + "angle": 0, + "content": "
TaskPrompt
FGQAQuestion: [question] \\n Options: \\n (A) [option1] \\n (B) [option2] \\n Only give the best option.
SGQAThe following question is asked by the camera wearer at the end of the video. Provide a detailed answer even if unsure. Try to answer in around 20-30 words. Now answer the following question based on the video content: [question]
RDCapCreate a dense caption of the subject's actions within the red rectangles, including action frames ids and brief descriptions. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video.
RCapGive a detailed description of the events occurring in the region marked by the red rectangle within frames ([start frame], [end frame]) in this 32 frame video
RTLocGiven the region marked by the red rectangle in the video, please provide the start and end frame of when '[event]' happens. Use the format (start, end), where start and end are frame numbers between 0 and 31 in this 32 frame video.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.334, + 0.825, + 0.362 + ], + "angle": 0, + "content": "Table 10: PLM-VideoBench task prompts. Items in square brackets are placeholders filled in for each benchmark instance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.39, + 0.521, + 0.406 + ], + "angle": 0, + "content": "D Additional PLM-VideoBench Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.825, + 0.465 + ], + "angle": 0, + "content": "We present benchmarking results across all model scales (1B, 3B, 8B) in Table 11, to supplement the 8B model results in the main paper (Table 5). Our approach consistently outperforms baselines across all scales, including proprietary models whose model scale is unknown." + }, + { + "type": "table", + "bbox": [ + 0.345, + 0.479, + 0.652, + 0.749 + ], + "angle": 0, + "content": "
ModelFGOAMBaccSGQAAcc†RDCAPSOA‡RCapscore†RTLocmeanRAvg
Human perf.90.967.966.653.967.870.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
1B scale
Qwen2VL-2B [30]39.038.50.918.110.829.1
InternVL2-1B [10]35.828.90.317.22.723.8
InternVL2.5-1B [10]42.339.66.723.61.630.8
PLM-1B57.640.950.340.957.749.4
3B scale
Qwen2.5 VL-3B [106]43.745.10.317.213.933.1
InternVL2-4B [10]43.241.70.519.99.630.3
InternVL2.5-4B [10]50.049.24.925.915.435.3
PLM-3B67.138.853.145.058.253.0
8B scale
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.19, + 0.751, + 0.804, + 0.765 + ], + "angle": 0, + "content": "Table 11: PLM-VideoBench results across all model scales to supplement results in Table 5." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.792, + 0.48, + 0.81 + ], + "angle": 0, + "content": "E Baseline Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.825, + 0.781, + 0.84 + ], + "angle": 0, + "content": "We provide baseline-specific implementation details for all models in §6.1 of the main paper." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Proprietary baselines We evaluate the GPT and Gemini family of models. For GPT-4o, we use the GPT-4o-2024-11-20 checkpoint. We feed 32 uniformly sampled frames regardless of video length, loaded at high image quality setting. For Gemini, we evaluate Gemini-1.5-Pro and Gemini-2.0-Flash. For VQA tasks, we input the video (without audio) which is processed internally at 1 fps. For" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "spatio-temporal tasks (RCap, RDCap, and RTLoc) we use the same inputs as for open-source models and GPT-4o. We evaluate these models using API call." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.828, + 0.207 + ], + "angle": 0, + "content": "Open-source models We evaluate InternVL, Qwen, Molmo and Llava-OV models. We follow official implementation and preprocessing pipelines for each. Specifically, we evaluate InternVL2 and InternVL2.5 (code); QwenVL2 and QwenVL2.5 (code); Molmo-O-0924 (code) and Llava-OV (code). For QwenVL, we sample frames at 1 fps from videos. For InternVL2, we use 12 tiles per image as this more closely matches the reported results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.219, + 0.828, + 0.278 + ], + "angle": 0, + "content": "Human performance baseline. In Table 5, we report human performance on PLM-VideoBench. For each task, we present annotators with the test sets and collect answers for each instance given the standard task prompt. Given the difficulty of RDCap, we reuse our data annotation pipeline in \\(\\S H\\) to collect new dense captions independently, rather than providing the standard task instruction." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.294, + 0.365, + 0.31 + ], + "angle": 0, + "content": "F Additional Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.325, + 0.422, + 0.341 + ], + "angle": 0, + "content": "F.1 Comparison with LLaMA-3V" + }, + { + "type": "table", + "bbox": [ + 0.294, + 0.357, + 0.699, + 0.457 + ], + "angle": 0, + "content": "
ModelAvg.DocVQA (test) acc [53]CharQA (test) acc [54]TextVQA (test) acc [52]InfoQA (test) acc [56]AL2D (two mask) acc [55]MMMU (val) acc [37]VQAV2 (val) acc [111]
LLaMA 3.2V (11B) [13]73.088.483.479.763.691.150.775.2
LLaMA 3.2V (90B) [13]76.690.185.582.367.292.360.378.1
PLM (1B)67.190.778.682.163.084.934.881.7
PLM (3B)74.493.884.384.374.690.941.284.3
PLM (8B)76.294.686.586.580.992.746.185.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.458, + 0.825, + 0.487 + ], + "angle": 0, + "content": "Table 12: PLM versus LLaMA-3V on Image Benchmarks: Note that we use LLaMA-3V-90B [13] for generating image captions in our synthetic data engine." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.52, + 0.341, + 0.536 + ], + "angle": 0, + "content": "F.2 Image Captioning" + }, + { + "type": "table", + "bbox": [ + 0.402, + 0.551, + 0.603, + 0.782 + ], + "angle": 0, + "content": "
ModelCOCO (karnathy) CIDEr [49]Nocap CIDEr [50]Flickr CIDEr [51]
Proprietary
GPT-4o [33]74.476.671.7
Gemini 1.5 Pro [35]70.671.168.2
Gemini 2.0 Flash [35]84.885.066.6
1B scale
Qwen2VL-2B [30]107.1101.286.0
InternVL2.5-1B [10]122.6110.586.1
PLM-1B138.6124.2100.5
3B scale
Qwen2.5 VL-3B [106]101.7105.577.5
InternVL2.5-4B [10]125.4117.187.4
PLM-3B144.9126.598.0
8B scale
LLaVA-OV-7B [28]112.170.755.7
Qwen2.5VL-7B [106]36.832.734.9
InternVL2.5-8B [10]125.8116.796.5
PLM-8B146.7129.9105.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.789, + 0.828, + 0.817 + ], + "angle": 0, + "content": "Table 13: Image Captioning benchmarks. PLM versus proprietary models and open-access baselines of comparable scale on Image Captioning benchmarks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.34, + 0.108 + ], + "angle": 0, + "content": "F.3 Image Grounding" + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.125, + 0.7, + 0.333 + ], + "angle": 0, + "content": "
ModelRefCOCOvalRefCOCO testARefCOCO testBRefCOCO+ valRefCOCO+ testARefCOCO+ testBRefCOCOg valRefCOCOg testAvg.
Specialists
GroundingDINO [211]90.693.288.288.289.075.986.187.086.6
UNINEXT-H [212]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [213]90.693.288.288.289.075.986.187.086.6
1B scale
PLM-1B88.591.584.883.288.676.586.086.485.7
3B scale
Qwen2.5 VL-3B [106]89.191.784.082.488.074.185.285.785.0
PLM-3B93.394.989.589.893.684.290.890.990.9
8B scale
Cube-LLM [214]90.992.687.983.989.277.486.687.287.0
Qwen2VL-7B [30]91.793.687.385.890.579.587.387.887.9
Qwen2.5VL-7B [106]89.191.784.082.488.074.185.285.785.0
InternVL2-8B [10]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [10]90.394.585.985.291.578.886.787.687.6
PLM-8B90.691.885.987.391.381.188.889.288.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.336, + 0.825, + 0.365 + ], + "angle": 0, + "content": "Table 14: Image Grounding results on RefCOCO+/g. PLM performs competitively compared to the baselines across all model scales, and outperforms specialist models for the image grounding task." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.398, + 0.405, + 0.415 + ], + "angle": 0, + "content": "F.4 Long Video Understanding" + }, + { + "type": "table", + "bbox": [ + 0.398, + 0.431, + 0.606, + 0.721 + ], + "angle": 0, + "content": "
ModelLong Video QA
LVBench acc [92]Long VideoBench (val) acc [94]MLVU (dev) Marq [96]
Proprietary
GPT-4o [33]37.266.7*67.4
Gemini 1.5 Pro [35]33.1*64.0*69.9
Gemini 2.0 Flash [35]-61.6*69.5
1B scale
Qwen2VL-2B [30]42.047.962.7
InternVL2-1B [10]31.443.3*52.0
InternVL2.5-1B [10]35.347.957.3*
PLM-1B40.052.358.9
3B scale
Qwen2.5 VL-3B [106]43.3*54.2*68.2
InternVL2-4B [10]34.053.0*59.9*
InternVL2.5-4B [10]40.156.368.3*
PLM-3B40.457.965.0
8B scale
LLaVA-OV-7B [28]38.855.764.6
Qwen2VL-7B [30]46.055.869.8*
Qwen2.5VL-7B [106]45.3*56.0*70.2*
InternVL2-8B [10]37.055.464.0*
InternVL2.5-8B [10]43.2*60.0*68.9
PLM-8B44.556.966.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.727, + 0.828, + 0.77 + ], + "angle": 0, + "content": "Table 15: Results on long video understanding tasks. We compare PLM with open-access baselines and proprietary models of comparable scale, and report results over 3 long video QA benchmarks. Cells with * are reported numbers from literature. The remaining are reproduced using official code." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.475, + 0.108 + ], + "angle": 0, + "content": "G PLM-FGQA: Fine-grained QA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.825, + 0.179 + ], + "angle": 0, + "content": "We present PLM-FGQA Fine-grained QA (FGQA), a video dataset focused on \"how\" actions are performed, capturing nuanced fine-grained details through specially designed questions and carefully annotated answers. Due to the scarcity of fine-grained video Q&A data, see Table 16, we built a data engine to enable the collection of our 2.4M Q&A dataset, PLM-FGQA." + }, + { + "type": "table", + "bbox": [ + 0.272, + 0.193, + 0.723, + 0.327 + ], + "angle": 0, + "content": "
DatasetYear#Q&AsDatasetYear#Q&As
MovieQA20166462STAR202160000
MSRVTT-QA2017243690CLEVRER202382620
TGIF-QA2017165165EgoQA202419000
MSVD-QA201751000PerceptionTest202444146
TVQA2018152545VideoInstruct202425803
ActivityNetQA201958000MoVQA202421953
How2QA202044007CinePile2024303828
Next-QA202152044Sports-QA202594000
PLM-FGQA20252379067
" + }, + { + "type": "table_caption", + "bbox": [ + 0.223, + 0.331, + 0.772, + 0.346 + ], + "angle": 0, + "content": "Table 16: Comparison of our PLM-FGQA dataset with existing video-QA datasets." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.385, + 0.45, + 0.4 + ], + "angle": 0, + "content": "G.1 Annotation process: Data Engine" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.825, + 0.48 + ], + "angle": 0, + "content": "Our data engine is built upon the following modules: (1) Temporal Segment Generation, (2) Question Generation, (3) Answer Generation, (4) Human Annotation (answer verification/manual answer annotation), (5) Quality Control, as illustrated in Figure 12. Next, we describe each module in detail, and finally also provide additional details about the extra steps we took for forming the FG-QA component of PLM-VideoBench out of these annotations." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.496, + 0.825, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.553, + 0.71, + 0.569 + ], + "angle": 0, + "content": "Figure 12: Data engine used to collect the PLM-FGQA dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.598, + 0.445, + 0.613 + ], + "angle": 0, + "content": "G.1.1 Temporal Segment Generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.825, + 0.664 + ], + "angle": 0, + "content": "We source the video data that serves as a basis for our annotations from publicly available datasets. Based on the video sources and the type of existing annotations, we split the videos into three distinct categories." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.825, + 0.741 + ], + "angle": 0, + "content": "Videos with existing ground-truth segment annotations: We directly adopt segments with their human-annotated action annotations from the following datasets: Ego4d Goal-Step[215], Ego4D Moments[115], EgoExo4D [116], HT-Step[216, 217], COIN [117], CrossTask [118], and YouCook2 [83]. All those sources provide video segment boundaries accompanied by some form of textual action descriptions, and are therefore readily usable with the rest of the pipeline." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Unedited videos of physical activities: For physical activities videos (e.g. basketball, dancing, soccer), actions are usually atomic and short (e.g. dribble, dance move, kick) and therefore rerequire precise temporal localization. To source videos for these scenarios we used data from EgoExo4D [116] that contains temporally well-aligned and precise narrations; we obtained segments of 2-3 seconds centered around narration timings, and used the anchor narrations directly as the action description." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.822, + 0.825, + 0.892 + ], + "angle": 0, + "content": "Raw, untrimmed videos in-the-wild without temporal segment annotations. We source a very large part of our data from untrimmed instructional videos in the large-scale HT100M dataset [114] which we first need to segment before use. The goal is to obtain video clips that contain meaningful, salient actions, and also caption the resulting segments with concise but accurate action descriptions. We describe the automatic segmentation and captioning module in the following." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.898, + 0.75, + 0.913 + ], + "angle": 0, + "content": "The automatic segmentation and captioning pipeline involves the following three stages:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.175, + 0.09, + 0.516, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.091, + 0.818, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.246, + 0.825, + 0.274 + ], + "angle": 0, + "content": "Figure 13: Distribution of question types (left) and video sources (right) in the FGQA component of PLM-VideoBench." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.312, + 0.827, + 0.478 + ], + "angle": 0, + "content": "Temporal segment proposal. Given untrimmed long videos, the first step is to identify semantically coherent segments within them. Inspired by prior work on unsupervised action proposal and segmentation, we leverage visual feature clustering to generate temporal segment proposals, and use shot-boundary detection results to further refine the segment boundaries. We extract clip-level visual features[218] using a sliding window with temporal stride of 1 second. We then compute the pairwise similarity between neighborhood features and detect the class-agnostic action boundaries using a boundary detection kernel (similar to those used in literature[219, 220]). Finally, since the detected segments are usually over-segmented, we perform a bottom-up agglomerate clustering approach to group adjacent segments into clusters, using a segment duration prior of 10 seconds. We also leverage shot boundary detection[221] to obtain precise moments of scene changes: we refine the boundaries of the segment proposals by aligning them to the detected shot boundaries when they're sufficiently close (\\(\\leq 1\\) second)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.484, + 0.825, + 0.554 + ], + "angle": 0, + "content": "Segment filtering and ranking. How-to videos often include a lot of content that is irrelevant to the demonstration of the activity at hand, such as the instructor explaining what they are about to do or showcasing tools and ingredients. It is therefore important to detect and filter segments with such uninformative content. To that end we rank candidate segments according to relevance using a series of heuristics and learned models, described below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.56, + 0.825, + 0.616 + ], + "angle": 0, + "content": "a. Talking head detection. A common mode in instructional videos is instructors talking into the camera, describing objects or explaining actions they're about to take. To detect and remove such segments, we employ an Active Speaker Detection (ASD) pipeline[222], which we run densely on every video and combine resulting talking head tracks, to produce an ASD score for every segment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.825, + 0.706 + ], + "angle": 0, + "content": "b. Hand-object interaction (HOI) detection. The presence of hand-object interaction (HOI) can be a good indicator of visually groundable actions. We leverage the temporal selection strategy[223] to filter out the segment proposals that contain hand-object interaction. We first employ an off-the-shelf robust HOI detector[224] to densely extract HOI regions within a proposed segment. The HOI score is then calculated by measuring the likelihood of hand-object interaction in the segment and the averaged probability of all the detected hands." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.712, + 0.825, + 0.837 + ], + "angle": 0, + "content": "c. ASR groundability. HT100M videos contain timestamped ASR captions, which are speech transcriptions of the audio instructions. It is desirable to rank candidate segments based on how likely their ASR content is to their video content. The hypothesis here is that segments containing ASR transcriptions that align well to the video content, are more likely to be visual-information rich. Moreover since the action labeling pipeline (described next) relies on ASR metadata for producing descriptions, higher ASR groundability scores make it likelier to produce good quality segment descriptions. For every candidate segment, we compute an ASR-groundability score by computing video-text alignment scores[218] for each ASR caption within the segment and then averaging the ones that are above a threshold (we use 0.5)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.913 + ], + "angle": 0, + "content": "d. Relevance classification. The above heuristics work well for the clear-cut cases they are tailored for, but in practice we found that they struggle with more nuanced segments (e.g. instructor fiddling with an object and describing it rather than using it). To improve the detection of those cases, we manually labelled a small amount of segments that passed through the other filters and trained a binary classifier to classify them as \"relevant\" or \"irrelevant\"; to that end we trained a simple 2-layer MLP classifier" + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.56, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "on top of temporally pooled video representations with a logistic loss for binary classification. We deployed the trained model to provide a relevance score for all the candidate segments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.826, + 0.169 + ], + "angle": 0, + "content": "We combined the scores resulting from all the modules described above and determined cutoff thresholds, based on a small manually annotated validation set. In production, we keep all the segments that have relevance scores above those thresholds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.175, + 0.828, + 0.259 + ], + "angle": 0, + "content": "Segment captioning We follow a two-step process to obtain action labels for each unlabeled segment: In the first step, a collection of off-the-shelf perception models are used to extract individual image-level captions, video-level captions, and object detections from the segment. The output of all perception models is then fed as text into an LLM to generate long, fine-grained captions. At the second step, the detailed captions are fused with the ASR content of the segment, to obtain a consice action description. Specifically, we query an LLM (Llama 3.3 70B [13]) with the following prompt:" + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.271, + 0.414, + 0.284 + ], + "angle": 0, + "content": "Segment to action labels prompt" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.287, + 0.816, + 0.351 + ], + "angle": 0, + "content": "Detailed description: [fine grained caption] ASR transcription: [asr caption]. Given the detailed description above, identify the specific action performed as part of the activity [task name]. Your response must not be the same as the activity [task name] and needs to be a specific substep within the activity [task name]. Please also supply a rationale for your answer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.363, + 0.825, + 0.391 + ], + "angle": 0, + "content": "The extracted labeled video segments obtained through the above process serve as the foundation for the subsequent Q&A generation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.409, + 0.453, + 0.424 + ], + "angle": 0, + "content": "G.1.2 Automatic Question Generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.434, + 0.825, + 0.504 + ], + "angle": 0, + "content": "We automatically generate questions about the fine-grained details of the way activities are executed in the video. Our questions is generated with a variety of prompts and models which lead to increased question diversity and specificity. In Table 17 we present the question types and sample questions per question type. Here, we summarize how these questions are generated automatically with an ensemble with models and prompts:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.826, + 0.593 + ], + "angle": 0, + "content": "LLM-based action-conditioned question generation Given a segment, its action name (e.g., cut potatoes), a task name (e.g., How to make sweet potato gratin) and optionally other metadata about the segment (for example, recognized objects [?]), we generate questions that can elicit descriptions of fine-grained details by raters with an LLM. We use tailored prompts for generating questions that cover how the activity is executed (tools, object locations, object states, direction of movements, hand pose), and the spatial arrangement of objects." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.606, + 0.464, + 0.619 + ], + "angle": 0, + "content": "Activity FG question generation prompt" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.622, + 0.816, + 0.711 + ], + "angle": 0, + "content": "I am learning how to [action name] while [task name]. Ask me [N] most relevant questions that reveal the details of the way the step is executed in my environment, e.g., (a) part location, (b) types of tools/ingredients used, (c) direction of movements, (d) how are objects held, (e) object states at the beginning of the step, (f) object state at the end of the step. The questions must be answerable by visually observing the activity, without reading instructions or trying out. Please indicate the type of question from (a) to (f) for each question asked at the beginning of the question." + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.726, + 0.457, + 0.739 + ], + "angle": 0, + "content": "Spatial FG question generation prompt" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.742, + 0.816, + 0.831 + ], + "angle": 0, + "content": "Imagine I have no common sense or understanding of the 3D real world. I am trying to [task name] and am at the step where I am [action name]. There's [object list] when I'm [action name]. Ask me [N] questions about the 3D position of objects, relative location between objects, distance between objects, spatial relationship using prepositions like above, below, next to, etc. that I might want to know. The questions must be answerable by only visually observing me performing activity, without reading instructions or trying out." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.827, + 0.913 + ], + "angle": 0, + "content": "We explicitly encourage the LLM to provide questions that can be answered solely based on the video frames, in contrast to questions that are focused on external knowledge or non-groundable concepts or judging the execution of the step (e.g., avoid questions like is the pan hot enough to add the oil?), what tool is typically used to loosen the axle nut). The rationale for this is to collect as many Q&A pairs that a model cannot answer just based on external knowledge/language prior, but they rather" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "require vision perception to be answered. Note that these questions are generated without visual input, hence they are not instance-specific and might not be answerable given the video segment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.827, + 0.306 + ], + "angle": 0, + "content": "VLM-based instance-specific question generation After collecting a first set of Q&As using the LLM-generated questions, we bootstrap a VLM Question Generator model, which takes as input the video segment, question types and optionally the task name, and generates a set of instance-specific visual questions. The VLM Question Generator model is obtained by supervised fine-tuning of PLM with a question generation instruction-tuning dataset which consists of triplets (video, prompt, response), where the prompt includes the instruction to generate questions based on question types and the response includes example questions to be generated for the given video. Due to the lack of such a dataset with fine-grained question, we synthetically generated it by utilizing the Q&A pairs obtained based on the LLM-generated questions. Specifically, for each video segment, we use an LLM to (1) decompose existing Q&A pairs into multiple Q&A pairs, with each new question focusing on one detail of the original answer; (2) tag question types for the generated questions based on an expanded list of question types; and (3) generate a (prompt, response) pair for the segment. This resulted in \\(\\sim 600k\\) training instances." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.317, + 0.465, + 0.33 + ], + "angle": 0, + "content": "VLM Question Generator training sample" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.333, + 0.816, + 0.385 + ], + "angle": 0, + "content": "Generate 3 different questions that reveal the fine-grained details of the way the activity is executed. In particular, focus on these question types: fine-grained object locations, hand pose, object/repetition counts, generating at least one question per type. Write each question in a separate line, e.g., Q1. first question." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.386, + 0.333, + 0.397 + ], + "angle": 0, + "content": "Q2. second question." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.406, + 0.315, + 0.421 + ], + "angle": 0, + "content": "ON. N-th question." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.422, + 0.251, + 0.434 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.435, + 0.605, + 0.447 + ], + "angle": 0, + "content": "Q1. Where are the tomatoes positioned prior to being cut?" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.448, + 0.649, + 0.459 + ], + "angle": 0, + "content": "Q2. How is the person grasping the tomato with their left hand?" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.461, + 0.598, + 0.472 + ], + "angle": 0, + "content": "Q3. How many tomatoes did the person use in the segment?" + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.435, + 0.649, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.483, + 0.825, + 0.54 + ], + "angle": 0, + "content": "LLM-based follow-up question generation This final set of questions aims to increase coverage of video details and generate highly fine-grained questions by leveraging the already collected Q&A pairs for each segment and feed them to an LLM that generates \"follow-up\" questions that are more detailed and challenging than the initial questions." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.55, + 0.45, + 0.563 + ], + "angle": 0, + "content": "Follow-up question generation prompt" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.566, + 0.819, + 0.743 + ], + "angle": 0, + "content": "I have the following information gathered about the video: [list of previous Q&A samples] Utilizing information and details from all the provided Q&A pairs (make sure to specialize questions based on the already corrected answers, e.g., using referring expressions), ask [N] most relevant and interesting, visual questions that we can ask annotators in order to reveal NEW, rich, additional fine-grained details about the video that we don't know yet, in particular about the following question types: 'tools/ingredients', 'object counts', 'repetition counts', 'direction of movement', 'hand pose', 'fine-grained object locations', 'spatial relations', 'initial state/end state', 'action happened before/after', 'clothes wearing', 'body pose', 'main action in the video', 'temporal extent of action', 'sizes'. The questions should be specific and have a specific answer. Avoid generic questions that can be very tedious to answer, e.g., how many objects are there in the scene. Also, do not generate questions that start with \"Is ...\" and then list options. Prefer open-ended questions, e.g., starting with \"How\". [... More examples & formatting ...]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.763, + 0.444, + 0.777 + ], + "angle": 0, + "content": "G.1.3 Automatic Answer Generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.827, + 0.913 + ], + "angle": 0, + "content": "The next step of the data engine aims to produce correct and comprehensive answers to the generated questions. We obtain automatic answers to the generated questions using a version of PLM that has been fine-tuned with extra privileged information of various forms as input. The privileged information includes textual annotations from the metadata available with the candidate training videos and feature embeddings extracted from off-the-shelf models. Useful textual metadata include the video title, ASR captions or written descriptions, video-level task name (inferred by an LLM using the title and captions), and any existing QAs for that video. Off-the-shelf embeddings include frame-level features extracted denseley at 1 fps; we use an open-vocabulary object detection model, OWLv2 [225], for embedding object detection information and CLIP ViT-L14 embeddings [226]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.186, + 0.088, + 0.813, + 0.55 + ], + "angle": 0, + "content": "
Question TypeSample Questions
Action RecognitionWhat is the process being performed on the sandpaper? \nWhat is the action shown?
Action SequenceWhat does the person do after brewing the tea? \nWhat does the person do before marking the vinyl with a pencil?
Counting ProblemsWhat is the quantity of universal down cleaner being poured into the task area? \nHow many branches does the person cut in total? \nHow many times does the person spray Greased Lightning onto the ketchup spill?
Movement DirectionIn what direction is the black welding tool pointing while the person is working on the metal joint? \nHow does the person chop the garlic with the knife?
Object AttributesWhat is the color of the seatpost shown in the video segment? \nWhat is the shape of the tube at the end of the step? \nWhat is the size of the knife being used to chop the spring onions?
Object LocationWhere does the person put the honey bottle away? \nWhere does the person position the clothes before ironing?
Object RecognitionWhat type of roller and paint are being used? \nWhat does the person place on top of the smooth half of the egg carton? \nWhat was the person initially holding in their left hand?
Object StateHow would you describe the sink at the beginning of the cleaning process? \nWhat is the state of the nematode after mixing it with water and sponge?
OtherAt what point in the video is the person seen holding the wires?
PoseHow are the woman's legs positioned while she is sitting? \nHow bent is the left elbow during the activity?
Spatial RelationsHow far is the bias tape maker from the right edge of the ironing board? \nWhat is the spatial relationship between the bowls and the Brussels sprouts on the kitchen countertop?
Speed/ForceHow would you describe the consistency of pressure applied during sanding? \nHow fast does the person initially push the stone?
" + }, + { + "type": "table_caption", + "bbox": [ + 0.299, + 0.556, + 0.698, + 0.571 + ], + "angle": 0, + "content": "Table 17: PLM-FGQA question types and sample questions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.628 + ], + "angle": 0, + "content": "for scene classification information. We incorporate the textual annotations directly into language prompts using the following template:" + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.637, + 0.434, + 0.65 + ], + "angle": 0, + "content": "Automatic answer generation prompt" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.653, + 0.817, + 0.692 + ], + "angle": 0, + "content": "A video is showing a task [video level task name], specifically the part where [ASR caption]. Here is what we already know about the video: [existing question-answer pairs]. Answer this question in detail: [question]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.7, + 0.825, + 0.73 + ], + "angle": 0, + "content": "The off-the-shelf embeddings are incorporated into the PLM input via an additional Perceiver-IO[227] tokenizer, which summarizes the embeddings at the segment level." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.827, + 0.779 + ], + "angle": 0, + "content": "We fine-tune the answer generator on 1M manually annotated QA pairs. After fine-tuning, we deploy the trained answer generator with privileged information access on the unlabelled questions produced in the previous step, to produce automatic answers." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.792, + 0.367, + 0.806 + ], + "angle": 0, + "content": "G.1.4 Human Annotation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.825, + 0.845 + ], + "angle": 0, + "content": "After obtaining segments and generating questions and automatic answers, we employ human annotators to obtain high-quality answers. Our answer annotations include the following:" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "- Human-verified answers: Raters are provided with the model-generated answer and are asked to accept or reject the answer. They can reject questions for being irrelevant or unanswerable, and answers for being factually incorrect or lacking details. Accepted question-answer pairs proceed without changes, while rejected ones are handled differently:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.228, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "question-related rejections (irrelevant or unanswerable) are discarded, whereas answer-related rejections (factually incorrect or lacking details) are marked for correction in the next phase. \\(17.8\\%\\) of the total training samples are human-verified automatic answers." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.141, + 0.825, + 0.238 + ], + "angle": 0, + "content": "- Human annotated answers: Raters answer the questions from scratch by ensuring to cover all the relevant details within the temporal segment. They receive reference information, such as video-level task names and ASR captions, and may use online resources like WikiHow for additional context. Questions that cannot be answered based on the video segment (for example, due to some false premise) are rejected (with an explanation). These manually annotated answers make up \\(82.2\\%\\) of the PLM-FGQA training split, and \\(100\\%\\) of the evaluation set." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.25, + 0.827, + 0.363 + ], + "angle": 0, + "content": "Quality Control. Data quality is crucial for model success. We followed several strategies to monitor and enhance annotation quality: annotation Certification - we reviewed a small sample of annotations from each rater before they could work in production queues, ensuring that annotators met high-quality standards before advancing to production; golden Examples - annotators were provided with high-quality annotation examples, highlighting common error patterns and offering acceptable answers. targeted and Dual QA - we conducted daily audits, including vendor auditing and our own sampled quality control. In total, \\(13\\%\\) of the training set was audited, and \\(100\\%\\) of the samples in PLM-VideoBench underwent quality control." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.38, + 0.493, + 0.396 + ], + "angle": 0, + "content": "G.2 FGQA PLM-VideoBench Construction" + }, + { + "type": "table", + "bbox": [ + 0.342, + 0.416, + 0.653, + 0.584 + ], + "angle": 0, + "content": "
TrainTest
Sources stats
Total Videos767k3.6k
Unique Source Videos251k1.9
Average Duration (sec.)9.812.3
Annotations stats
Number of QA Pairs2.4M4.2k
Number Question Types1212
Question Length (avg/max)12/11412.3/56
Answer Length (avg/max)13.3/91114.1/62
Annotation TypeHumanHuman
Open-DomainYesYes
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.587, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Table 18: Statistics of the PLM-FGQA training and test data. The test split refers to the FGQA module of PLM-VideoBench." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.642, + 0.825, + 0.713 + ], + "angle": 0, + "content": "The FG-QA component of PLM-VideoBench is formed from a held-out portion of PLM-FGQA. We refine this set and transform it into a challenging MCQ-based benchmark by (1) generating MCQs, (2) filtering out samples that can be answered by text-only (blind) LLMs, (3) performing human verification of negatives, and (4) balancing the distribution of question types and domains. The statistics of the dataset are summarized in Table 18. In more detail the steps we followed are:" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.718, + 0.825, + 0.789 + ], + "angle": 0, + "content": "MCQ Generation: To transform QAs into challenging MCQs for evaluation, instead of generating random incorrect answers, we prompt LLMs to produce hard negatives that are semantically close to the correct answer. We use the following prompt which was designed to generate distractors that differ from the correct answer by only a single detail. In effect this enables evaluation to assess fine-grained reasoning about object attributes and tool distinctions." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.794, + 0.825, + 0.851 + ], + "angle": 0, + "content": "Filtering Text-Only Answers: To ensure that video-based reasoning is required, we test whether a text-only LLM can answer the question correctly without seeing the video. If a question can be answered correctly from text alone, we remove or modify it to emphasize visual and temporal grounding." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Human Verification of Negatives: Automatically generated negatives may sometimes be factually true despite being labeled as incorrect. To address this, we perform human verification, where annotators review distractors to confirm that they are both plausible yet definitively incorrect given the video context.MCQs with ambiguous distractors are removed." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Balancing Question Types: Finally, after the above postprocessing and filtering is done, we rebalance the test set, to make sure that the question type and domain distributions are approximately uniform, by undersampling over-represented question types and domains." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Note on the evaluation metric. We report the multi-binary accuracy (MBAcc) [99] to evaluate on the FG-QA task. This accuracy is calculated by comparing the correct answer to each distractor individually. Specifically, for each question, we generate a series of binary questions, where the correct answer is compared with one distractor at a time. A prediction is considered correct only if the correct answer is consistently selected across all binary comparisons. We preferred this metric to vanilla MCQ accuracy as it greatly reduces the predictability of automatically-generated MCQs." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.239, + 0.34, + 0.252 + ], + "angle": 0, + "content": "MCQ generation prompt" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.256, + 0.543, + 0.267 + ], + "angle": 0, + "content": "Here is a question and answer pair about a video:" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.269, + 0.281, + 0.28 + ], + "angle": 0, + "content": "Q: [question]" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.282, + 0.266, + 0.293 + ], + "angle": 0, + "content": "A: [answer]" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.293, + 0.816, + 0.457 + ], + "angle": 0, + "content": "You need to transform this into a high-quality multiple-choice question. To do this, first rephrase the given correct answer and then provide n distractor answers. The n incorrect answers should be reasonable and valid responses to the question, but should have a different meaning than the correct answer. You generate an incorrect answer from the correct one by changing a single detail, e.g. an object or verb/action that is relevant to what's being asked. Make the incorrect answers realistic, plausible and similar enough to the correct answer so that it is very difficult for someone to distinguish between them with prior knowledge alone. Finding the correct answer should also require visual information about the scene. The distractor answers should answer the question, but should be incorrect but in a non-obvious way. When changing a single detail to create the distractors, make sure that this detail is the main point of the question. For example, if the question is about the color of an object, then the distractor should change the color of the object and not the kind of object." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.457, + 0.816, + 0.482 + ], + "angle": 0, + "content": "Here are some examples of good distractors (desired) and bad distractors (to be avoided):" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.483, + 0.684, + 0.495 + ], + "angle": 0, + "content": "Q: What is the person wearing on their hands while applying varnish?" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.495, + 0.815, + 0.519 + ], + "angle": 0, + "content": "A: The person is wearing white gloves on their hands while applying varnish with a brush." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.52, + 0.308, + 0.531 + ], + "angle": 0, + "content": "Good distractors:" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.532, + 0.812, + 0.556 + ], + "angle": 0, + "content": "- The person is wearing black gloves on their hands while applying varnish with a brush. Bad distractors:" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.558, + 0.812, + 0.582 + ], + "angle": 0, + "content": "- The person is wearing black gloves on their hands while applying paint with a roller. .. More examples & formatting ..." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.611, + 0.367, + 0.627 + ], + "angle": 0, + "content": "H PLM-STC Details" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.646, + 0.825, + 0.786 + ], + "angle": 0, + "content": "We present PLM Spatio-Temporal Captions (PLM-STC), a novel dataset aimed at training and evaluating VLMs for spatial-temporal reasoning. We collected pairs of mask tablets for objects in videos, along with their corresponding detailed temporal descriptions. The annotations are collected on top of the SA-V [124] videos, which are diverse and high-quality. We excluded the test set videos from SA-V, to avoid any data cross contamination. Table 20 provides statistics about the dataset, such as number of total samples, training/val/test splits, object types, and time-segment duration. PLM-STC, is not only novel, but also larger and higher quality compared to existing datasets, see Table 19. In Fig. 5 (right), we show an example of our spatio-temporal captions, describing a little girl (highlighted in blue): (frame 0-81): A little girl moves back as beluga whale approaches her face. (frame 82-85): Out of frame. (frame 86-98): She tries to feed the whale." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.82 + ], + "angle": 0, + "content": "We describe the overall annotation process in Appendix H.1, and how we build the three sub-tasks in Appendix H.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.842, + 0.355, + 0.856 + ], + "angle": 0, + "content": "H.1 Annotation Process" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.911 + ], + "angle": 0, + "content": "The annotation process is summarized in Figure 14. The annotation process involves three stages: Object Selection and Tracking, Temporal Segmentation and Captioning and Verification and Quality Control." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.242, + 0.089, + 0.758, + 0.235 + ], + "angle": 0, + "content": "
DatasetSpatial TypeYear#VideosRegionsTemp. Seg.Captions?
DAVIS16-RVOS [228]Segmentation20185050-No
DAVIS17-RVOS [229]Segmentation201890205-No
YouCook2-BB [83]BBox2018647-4.3KNo
A2D Sentence [230]Segmentation20183.7K4.8K-No
J-HMDB Sentence [231]Segmentation2018928928-No
ActivityNet Entities [232]BBox201914.3K1.5M52KNo
VidSTG [9]BBox20206.9K44.8K-No
Refer-Youtube-VOS [233]Segmentation20203.9K7.5K-No
HC-STVG [234]BBox202116K16K-No
VLN [123]Mouse Trace202350K43.1K43.1KYes
MeVis [235]Segmentation20232K8.8K-No
PLM-STCSegmentation202545.7K122.3K194.2KYes
" + }, + { + "type": "table_caption", + "bbox": [ + 0.288, + 0.241, + 0.71, + 0.257 + ], + "angle": 0, + "content": "Table 19: Spatio-Temporal-Captioning datasets comparison." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.279, + 0.825, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.349, + 0.34, + 0.647, + 0.355 + ], + "angle": 0, + "content": "Figure 14: PLM-STC Annotation pipeline." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.391, + 0.442, + 0.407 + ], + "angle": 0, + "content": "H.1.1 Object Selection and Tracking" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.418, + 0.828, + 0.584 + ], + "angle": 0, + "content": "Annotators select interesting objects with significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. We instructed the annotators by defining interesting regions in video footage as those with the presence of significant, dynamic actions performed by subjects, which can be human, animal, or object. These regions involve multiple major actions that evolve over time, rather than static or insignificant actions. We provided annotators with examples of interesting regions, such as one featuring a person making a sandwich, a dog chasing a cat, or a kite getting stuck in a tree. The goal for the annotator is to identify regions with high delta, where the subject performs a sequence of significant activities that change over time, such as a person entering a room, sitting down, and then drinking from a glass. By focusing on these dynamic and evolving actions, annotators can effectively select regions worthy of captioning. Finally, annotators are provided with several examples of good and bad annotations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.51, + 0.625 + ], + "angle": 0, + "content": "H.1.2 Temporal Segmentation and Captioning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.827, + 0.749 + ], + "angle": 0, + "content": "Based on the selected mask tablets, another set of annotators provides time segments for each action and fills in the caption within each time segment. The annotators are instructed to focus on capturing major actions, avoiding minor details or unnecessary movements. When writing captions for each segment, they must ensure clarity in describing the subject's movements and directionality. Additionally, the annotators are advised to avoid making assumptions about the subject's actions or adding details not clearly visible, sticking only to what is directly observable in the frame. As in the previous task, the annotators are provided with several examples of good and bad annotations to guide their work." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.773, + 0.457, + 0.789 + ], + "angle": 0, + "content": "H.1.3 Verification and Quality Control" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.825, + 0.913 + ], + "angle": 0, + "content": "A final set of annotators manually verifies the tablets and time-segment captions to ensure accuracy and consistency. For mask refinement, we re-run the same pipeline as §H.1.1, while not letting the annotators choose the interesting object, but only refine the quality of the mask. For captioning refinement, the annotators are tasked with three objectives: 1) Redundancy: eliminate any repeating or redundant information to ensure the caption is concise; 2) Accuracy: verify that every word in the caption accurately describes a fact present in the video, correcting or removing any incorrect information; and 3) Actions: add missing major action information to the caption while preserving existing atomic actions, ensuring the caption effectively conveys the key events in the video." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.288, + 0.089, + 0.707, + 0.305 + ], + "angle": 0, + "content": "
AllTrainValTest
Dataset stats
Number of Videos45.2K42.0K8042.3K
Spatio Temporal Caption127.8K---
Temporal Caption198.7K---
Tube's categories
Person104.5K99.6K8612.4K
Animal16.8K13.2K5501.7K
Object/things6.4K4.4K4361.2K
Temporal captions per Tube
1 caption per tube78.9K73.9K8422.4K
2 caption per tube30.9K27.8K5661.7K
3 or more Caption per tube16.38K14.15K4211.2K
Tasks stats
Region Detailed Captioning (RDCap)122.3K117.2K2.5K2.6K
Region Captioning (RCap)194.2K179.5K4.6K10.1K
Region Temporal Localization (RTLoc)192.0K179.5K4.6K7.9K
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.307, + 0.825, + 0.351 + ], + "angle": 0, + "content": "Table 20: PLM-STC dataset statistics. Note the for RTLoc, we filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.388, + 0.38, + 0.403 + ], + "angle": 0, + "content": "H.2 PLM-STC Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.825, + 0.485 + ], + "angle": 0, + "content": "We utilize the collected data to train and evaluate the PLM on three challenging tasks that are essential for video perception. Firstly, we created a balanced validation and test split by the combination of tube categories and number of caption per tube while making sure no video overlaps with the training set. This is done to make sure we evaluate all the categories presents in the dataset equally. Then, we process the data for each task:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.49, + 0.825, + 0.547 + ], + "angle": 0, + "content": "- Dense Video Region Captioning (RDCap). This comprehensive task combines both \"what\" and \"when\" aspects. The model takes the video and the tubelets as input and outputs the full time-segment captions. We also assign an out of frame caption to temporal segments for which the subject does not appear in the video to ensure dense temporal coverage of events across the video duration." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.553, + 0.827, + 0.609 + ], + "angle": 0, + "content": "Video Region Captioning (RCap). This task involves describing \"what\" activities are performed within a specific time frame by the objects in the tubelets. The model receives the video, the tubelets, and the temporal region as input and outputs the corresponding captions. We filter out events that refer to the subject when it is out-of-frame to avoid evaluating trivial captions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.827, + 0.713 + ], + "angle": 0, + "content": "Region Temporal Localization (RTLoc). This task requires the model to localize \"when\" specific events occur in relation to a given tubelet. The input includes the video, the tubelet, and the caption, while the output is the start and end frames indicating when the captioned event occurs. Like RCap, we filter out out-of-frame events, as well as ambiguous events that may be localized to multiple time segments. For example, if the subject opens the door twice, the event text is guaranteed to be unique (e.g., referring to the first and second time they opened the door) or dropped entirely if ambiguous (e.g., if the text only mentions the action)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.827, + 0.761 + ], + "angle": 0, + "content": "These tasks are designed to both improve and evaluate the model's capabilities, with the same input-output format applied during both training and evaluation. Figure 6 illustrate an examples of the task, including the prompt used to train and evaluate the PLM." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.783, + 0.37, + 0.799 + ], + "angle": 0, + "content": "I Smart Glasses Data" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.816, + 0.426, + 0.83 + ], + "angle": 0, + "content": "I.1 Data collection and annotation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.827, + 0.914 + ], + "angle": 0, + "content": "We collected the source videos for PLM-SGQA using commercial smart glasses, which enable participants to capture egocentric videos in a hands-free manner. Participants are presented with 14 categories of popular scenarios, such as shopping, cooking, and walking in a neighborhood, and are instructed to ask questions about their surroundings as if interacting with a multi-modal assistant that shares their visual perspective. Specifically, participants are asked to ask questions spontaneously," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.149 + ], + "angle": 0, + "content": "without delay, about the things they see and experience, and to focus on visual queries rather than dynamic information that may change regularly. After recording the videos, participants annotate the segments by marking the start and end points of the video relevant to each question, as well as providing the ground-truth answer." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.164, + 0.345, + 0.179 + ], + "angle": 0, + "content": "I.2 SGQA Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.189, + 0.827, + 0.314 + ], + "angle": 0, + "content": "To create the SGQA component of PLM-VideoBench we first filtered the Q&As using an LLM to obtain a shortlist of questions that focus on human activity and also are perception-based rather than based on general knowledge. This means that SGQA focus on questions that require good visual understanding of the scene to be accurately answered. This process yields an evaluation set consisting of 655 Q&As. For the resulting Q&As, we then trimmed the original videos to obtain clips within the temporal boundary that the human wearer/annotator specified. As the annotated segments end at the point where the smart-glass wearer asks the question, it is important for all evaluations to specify that the question refers to the end of the video clip - e.g. see the prompt we used for PLM and baselines evaluation in 10. We summarize the statistics of the SGQA test set in Figures 15 and 16." + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.348, + 0.416, + 0.462 + ], + "angle": 0, + "content": "
Sources stats
Total Videos663
Average Duration (sec.)29.4
Annotations stats
Number of QA Pairs665
Number Domains14
Question Length (avg/max)9.0 / 52
Answer Length (avg/max)21.6 / 40
Annotation TypeHuman
Open-DomainYes
" + }, + { + "type": "table_caption", + "bbox": [ + 0.206, + 0.468, + 0.419, + 0.496 + ], + "angle": 0, + "content": "Figure 15: Statistics of the PLMSGQA test data." + }, + { + "type": "image", + "bbox": [ + 0.424, + 0.334, + 0.787, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.416, + 0.489, + 0.793, + 0.518 + ], + "angle": 0, + "content": "Figure 16: Domain distribution of video-clips in PLMSGQA." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.521, + 0.395, + 0.539 + ], + "angle": 0, + "content": "J Synthetic Data Engine" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.825, + 0.595 + ], + "angle": 0, + "content": "Our data engine targets base capabilities of VLMs: image captioning, visual question answering, OCR, chart/diagram understanding, and video understanding. We developed different pipelines for images and videos, and includes different levels of metadata to generate captions and QAs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.6, + 0.825, + 0.63 + ], + "angle": 0, + "content": "Image Captions: We caption high-quality images using Llama 3.1V 90B. An example is shown in Figure 17. We use this pipeline to caption SA1B [105], Object365 [135], and OpenImages [136]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.635, + 0.825, + 0.678 + ], + "angle": 0, + "content": "OCR QAs: We leverage pre-extracted OCR and use it as input for a LLM (i.e., Llama 3.3 70B) to generate a set of five question-answer pairs. An example is shown in Figure 18. We use this pipeline to generate QAs for PDFAcc [132], and UCSF [133]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.683, + 0.825, + 0.754 + ], + "angle": 0, + "content": "Image Captioning plus QAs: In cases for which OCR does not provide enough information to create questions (e.g., scientific figures), we further caption the image using Llama 3.1V 90B. Then we pass the caption with auxiliary metadata (e.g., OCR) to a LLM (i.e., Llama 3.3 70B) to generate question-answers pairs. An example is shown in Figure 19). We use this pipeline to generate captions and QAs for ArxivQA [59], DocVQA [53], InfoVQA [56] and Ai2d [55]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.827, + 0.843 + ], + "angle": 0, + "content": "Video Captioning plus QAs: An image captioner is run on key-frames of the video, as well as a video captioner on the overall video at 1 fps. The result captions are passed to a LLM (i.e., Llama 3.3 70B, or Llama 3 405B) with additional metadata (e.g., video title etc.), so to generate a detailed caption and a multiple-chosen question answers pair. An example is shown in Figure 20). We use this pipeline to generate captions and QAs for YT-1B [236], Ego4d [115] \\(^{3}\\), DiDeMo [140], Charades [139], and Kinetics710 [121] \\(^{4}\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.871, + 0.59, + 0.884 + ], + "angle": 0, + "content": "3For this dataset we used Llama3 405B, rather than Llama 3.3 70B" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.911 + ], + "angle": 0, + "content": "4DiDeMo, Charades, Kinetics710 used a simpler pipeline where only frame captions were used, and a smaller scale LLM (i.e., Llama 3.1 8B)" + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.871, + 0.825, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.566, + 0.105, + 0.687, + 0.119 + ], + "angle": 0, + "content": "Detailed Caption" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.146, + 0.404, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.124, + 0.845, + 0.3 + ], + "angle": 0, + "content": "The image depicts a yellow building with a red door and a sign that reads \"YOLCU SALONU\" in black letters. The building has a gable roof and a small window on the left side. There are two trees in front of the building, one on each side of the entrance. The ground is wet, and there is snow on the trees and the ground. In the background, there is a train station with a train on the tracks. The sky is gray and overcast. The image appears to be a photograph taken on a cold winter day. The presence of snow and the gray sky suggest that it is a winter scene. The building and the train station in the background suggest that it is a public place, possibly a train station or a bus stop. The sign on the building reads \"YOLCU SALONU,\" which is Turkish for \"Passenger Lounge.\" This suggests that the building is a waiting area for passengers. Overall, the image depicts a quiet and peaceful winter scene, with a focus on the building and the surrounding environment." + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.307, + 0.701, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.353, + 0.825, + 0.381 + ], + "angle": 0, + "content": "Figure 17: Detailed caption example, and the corresponding pipeline. The captioner (i.e., Llama 3V 90B) is prompted to generate the caption for the provided image." + }, + { + "type": "title", + "bbox": [ + 0.656, + 0.417, + 0.696, + 0.43 + ], + "angle": 0, + "content": "OCR" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.436, + 0.845, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.444, + 0.605, + 0.574, + 0.618 + ], + "angle": 0, + "content": "Question Answers" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.624, + 0.436, + 0.637 + ], + "angle": 0, + "content": "Q: How many teams qualify for the finals?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.637, + 0.834, + 0.66 + ], + "angle": 0, + "content": "A: The top 3 teams from each section, plus the next two highest teams on points, qualify for the finals, making a total of 5 teams." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.662, + 0.49, + 0.675 + ], + "angle": 0, + "content": "Q: How many points has the Buderim team earned?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.675, + 0.449, + 0.687 + ], + "angle": 0, + "content": "A: The Buderim team has earned 150 points." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.687, + 0.419, + 0.699 + ], + "angle": 0, + "content": "Q: Which team is currently in the lead?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.7, + 0.64, + 0.712 + ], + "angle": 0, + "content": "A: The Albert team is currently in the lead with 36 points and a margin of 39." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.713, + 0.513, + 0.724 + ], + "angle": 0, + "content": "Q: What is the current margin of the Mooloolaba team?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.725, + 0.487, + 0.737 + ], + "angle": 0, + "content": "A: The Mooloolaba team has a current margin of 1." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.737, + 0.48, + 0.749 + ], + "angle": 0, + "content": "Q: What is the overall summary of the document?" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.75, + 0.834, + 0.788 + ], + "angle": 0, + "content": "A: The document appears to be a standings table for the Ladies Division Three of a bowling or lawn bowls competition, showing the current standings of various teams, including their win-loss records, points, and margins." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.624, + 0.834, + 0.788 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.479, + 0.802, + 0.538, + 0.816 + ], + "angle": 0, + "content": "Pipeline" + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.822, + 0.703, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.898 + ], + "angle": 0, + "content": "Figure 18: Visual Question Answering pairs and the corresponding pipeline. The OCR text is extracted from the image, and passed to the LLM (i.e., Llama 3.3 70B) to generate QA pairs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.656, + 0.211, + 0.696, + 0.224 + ], + "angle": 0, + "content": "OCR" + }, + { + "type": "image", + "bbox": [ + 0.235, + 0.23, + 0.437, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.282, + 0.844, + 0.31 + ], + "angle": 0, + "content": "SB 1011 10 Fraction of objects 0.2 0.1 0.0 0.01 0.1 1.0 10.0 erg s1" + }, + { + "type": "title", + "bbox": [ + 0.447, + 0.366, + 0.57, + 0.38 + ], + "angle": 0, + "content": "Detailed Caption" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.386, + 0.837, + 0.526 + ], + "angle": 0, + "content": "The image depicts a histogram of the distribution of objects, with the x-axis labeled \"LSB[1046ergs-1]\" and the y-axis labeled \"Fraction of objects.\" The x-axis ranges from 0.01 to 10.0, while the y-axis ranges from 0.0 to 0.2. The histogram is divided into bins of varying widths, with the first bin spanning from 0.01 to 0.1, the second bin spanning from 0.1 to 1.0, and so on. Each bin contains a bar representing the fraction of objects within that range. The bars are shaded in gray, with some bins having multiple bars. A key feature of the histogram is the presence of a peak in the middle bin, which corresponds to an \\(L_{SB}\\) value of around 1.0. This suggests that the majority of objects have an \\(L_{SB}\\) value close to this value. The histogram also shows a tail extending towards higher \\(L_{SB}\\) values, indicating that some objects have significantly higher \\(L_{SB}\\) values than the majority. Overall, the histogram provides a visual representation of the distribution of \\(L_{SB}\\) values among the objects being studied. It allows for easy identification of patterns and trends in the data, such as the peak in the middle bin and the tail towards higher \\(L_{SB}\\) values." + }, + { + "type": "title", + "bbox": [ + 0.365, + 0.53, + 0.652, + 0.545 + ], + "angle": 0, + "content": "Multi-Choice Question Answer (MCQA)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.549, + 0.687, + 0.563 + ], + "angle": 0, + "content": "What is the approximate \\( L_{SB} \\) value at which the majority of objects have their peak?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.563, + 0.236, + 0.574 + ], + "angle": 0, + "content": "Options:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.576, + 0.228, + 0.587 + ], + "angle": 0, + "content": "(A) 0.1" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.588, + 0.228, + 0.599 + ], + "angle": 0, + "content": "(B) 1.0" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.6, + 0.228, + 0.611 + ], + "angle": 0, + "content": "(C) 5.0" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.612, + 0.236, + 0.624 + ], + "angle": 0, + "content": "(D) 10.0" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.626, + 0.285, + 0.637 + ], + "angle": 0, + "content": "Answer: (B) 1.0." + }, + { + "type": "list", + "bbox": [ + 0.184, + 0.563, + 0.285, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.478, + 0.642, + 0.539, + 0.656 + ], + "angle": 0, + "content": "Pipeline" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.661, + 0.803, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.748, + 0.828, + 0.79 + ], + "angle": 0, + "content": "Figure 19: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. The OCR text is extracted from the image, and the caption is generated by the captioner (i.e., Llama 3V 90B), which are all passed to the LLM (i.e., Llama 3.3 70B) to generate MCQAs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.117, + 0.835, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.473, + 0.193, + 0.545, + 0.206 + ], + "angle": 0, + "content": "Metadata" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.212, + 0.515, + 0.225 + ], + "angle": 0, + "content": "Title: Lions VS Colts Highlights 2017 Preseason Game" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.225, + 0.568, + 0.239 + ], + "angle": 0, + "content": "Description: Comment suggestions for future videos and Enjoy!" + }, + { + "type": "title", + "bbox": [ + 0.453, + 0.242, + 0.564, + 0.257 + ], + "angle": 0, + "content": "Frame Caption" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.261, + 0.836, + 0.324 + ], + "angle": 0, + "content": "Frame 435: The image shows a man with dreadlocks standing in front of a crowd of people in a stadium. He is wearing a white t-shirt and is surrounded by a group of people standing on the ground. On the left side of the image, there is a table fan, bottles, and other objects placed on a table. In the background, there are people sitting on chairs, stairs, railings, boards with text, lights, and the sky. The text on the boards reads \"Indianapolis Colts vs San Francisco 49ers" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.324, + 0.834, + 0.362 + ], + "angle": 0, + "content": "Frame 585: The image shows a football game being played on a TV screen, with a group of people on the ground and a few people standing in the background. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.362, + 0.834, + 0.4 + ], + "angle": 0, + "content": "Frame 765: The image shows a group of people playing a game of football on a green field, with white lines marking the boundaries of the field. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.4, + 0.834, + 0.438 + ], + "angle": 0, + "content": "Frame 945: The image shows a football game being played on a TV screen, with people wearing helmets and playing on the ground. At the bottom of the image, there is text and numbers indicating that the game is between the Detroit Lions and the Indianapolis Colts." + }, + { + "type": "title", + "bbox": [ + 0.456, + 0.442, + 0.561, + 0.457 + ], + "angle": 0, + "content": "Video Caption" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.461, + 0.836, + 0.486 + ], + "angle": 0, + "content": "Football players wearing helmets, Detroit Lions vs Indianapolis Colts, player running with ball, falls down, touchdown scored." + }, + { + "type": "title", + "bbox": [ + 0.447, + 0.489, + 0.57, + 0.504 + ], + "angle": 0, + "content": "Detailed Caption" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.508, + 0.836, + 0.597 + ], + "angle": 0, + "content": "A football player is running with the ball and then falls down, the game is between the Detroit Lions and the Indianapolis Colts, with players wearing blue and white uniforms and helmets, and people sitting in the stadium, a watermark on the image shows the teams playing, one player is laying on the ground next to another player wearing a white and blue jersey and a white helmet, a horseshoe-shaped object is on the grass, text on the image indicates a touchdown, players are wearing helmets and playing on the ground, with watermarks in the bottom left corner of the images, the game involves the Detroit Lions and the Indianapolis Colts, and also shows the Colts playing against the New England Patriots and the Lions playing against the Dallas Cowboys." + }, + { + "type": "title", + "bbox": [ + 0.365, + 0.601, + 0.652, + 0.616 + ], + "angle": 0, + "content": "Multi-Choice Question Answer (MCQA)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.62, + 0.49, + 0.634 + ], + "angle": 0, + "content": "What is the outcome of the football player's action?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.634, + 0.236, + 0.646 + ], + "angle": 0, + "content": "Options:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.647, + 0.382, + 0.659 + ], + "angle": 0, + "content": "(A) the player runs out of bounds" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.659, + 0.386, + 0.671 + ], + "angle": 0, + "content": "(B) the player scores a touchdown" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.671, + 0.351, + 0.683 + ], + "angle": 0, + "content": "(C) the player drops the ball" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.684, + 0.333, + 0.696 + ], + "angle": 0, + "content": "(D) the player falls down" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.696, + 0.339, + 0.709 + ], + "angle": 0, + "content": "(E) the player gets injured" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.709, + 0.443, + 0.721 + ], + "angle": 0, + "content": "Answer: (B) the player scores a touchdown." + }, + { + "type": "list", + "bbox": [ + 0.184, + 0.647, + 0.443, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.478, + 0.726, + 0.539, + 0.74 + ], + "angle": 0, + "content": "Pipeline" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.746, + 0.803, + 0.819 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.884 + ], + "angle": 0, + "content": "Figure 20: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. An LLM (i.e., Llama 3.3 70B) with a video caption, frame captions at 1 fps, and additional metadata (e.g., video title etc.), generates a detailed caption and multiple-choices question answers pair." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.372, + 0.108 + ], + "angle": 0, + "content": "K Qualitative Results" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.164, + 0.381, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.131, + 0.743, + 0.145 + ], + "angle": 0, + "content": "Question: Provide a detailed description of the image." + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.145, + 0.845, + 0.195 + ], + "angle": 0, + "content": "Answer: The image is a pink postage stamp featuring a map of the Grenadines of St. Vincent, with a red border and black text. The map shows various locations on the island, including hotels, bays, and other points of interest." + }, + { + "type": "text", + "bbox": [ + 0.419, + 0.195, + 0.516, + 0.208 + ], + "angle": 0, + "content": "*Key Features:*" + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.219, + 0.844, + 0.244 + ], + "angle": 0, + "content": "* *Map:* A pink map of the Grenadines of St. Vincent, with a red border and black text." + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.245, + 0.844, + 0.27 + ], + "angle": 0, + "content": "* *Locations:* Various locations on the island are marked, including hotels, bays, and other points of interest." + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.27, + 0.801, + 0.283 + ], + "angle": 0, + "content": "* *Scale:* A scale in miles is provided at the bottom of the map." + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.283, + 0.844, + 0.308 + ], + "angle": 0, + "content": "* *Denomination:* The denomination \"30c\" is printed in the bottom-right corner of the stamp." + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.308, + 0.779, + 0.321 + ], + "angle": 0, + "content": "* *Border:* The stamp has a red border with a serrated edge." + }, + { + "type": "list", + "bbox": [ + 0.418, + 0.219, + 0.844, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.332, + 0.553, + 0.346 + ], + "angle": 0, + "content": "*Overall Impression:*" + }, + { + "type": "text", + "bbox": [ + 0.417, + 0.358, + 0.845, + 0.409 + ], + "angle": 0, + "content": "The image appears to be a vintage postage stamp from the Grenadines of St. Vincent, featuring a map of the island and various locations of interest. The stamp has a classic design and is likely from the mid-20th century." + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.419, + 0.62, + 0.435 + ], + "angle": 0, + "content": "Figure 21: PLM-8B OCR examples." + }, + { + "type": "image", + "bbox": [ + 0.328, + 0.457, + 0.49, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.47, + 0.672, + 0.508 + ], + "angle": 0, + "content": "Question: What does the word say together from the bikers?" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.509, + 0.674, + 0.535 + ], + "angle": 0, + "content": "Answer: The word says Indianapolis." + }, + { + "type": "image_caption", + "bbox": [ + 0.34, + 0.555, + 0.657, + 0.571 + ], + "angle": 0, + "content": "Figure 22: PLM-8B OCR in the wild examples." + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.597, + 0.451, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.624, + 0.674, + 0.65 + ], + "angle": 0, + "content": "Question: How tall is the. \nkitty character in the image?" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.65, + 0.674, + 0.687 + ], + "angle": 0, + "content": "Answer: The kitty character is approximately \\(100\\mathrm{cm}\\) tall." + }, + { + "type": "image_caption", + "bbox": [ + 0.34, + 0.723, + 0.657, + 0.739 + ], + "angle": 0, + "content": "Figure 23: PLM-8B OCR in the wild examples." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.342, + 0.18, + 0.476, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.178, + 0.672, + 0.203 + ], + "angle": 0, + "content": "Question: What is inside the oven?" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.204, + 0.672, + 0.228 + ], + "angle": 0, + "content": "Answer: There is a baking sheet inside the oven." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.229, + 0.675, + 0.253 + ], + "angle": 0, + "content": "Question: What temperature is the oven set to?" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.254, + 0.672, + 0.279 + ], + "angle": 0, + "content": "Answer: The oven is set to 200 degrees." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.279, + 0.672, + 0.304 + ], + "angle": 0, + "content": "Question: What unit is the oven's temperature set to?" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.305, + 0.675, + 0.329 + ], + "angle": 0, + "content": "Answer: The oven's temperature is set to Celsius." + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.349, + 0.629, + 0.366 + ], + "angle": 0, + "content": "Figure 24: PLM-8B Natural image QA" + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.549, + 0.513, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.554, + 0.554, + 0.723, + 0.579 + ], + "angle": 0, + "content": "Question: Describe the image in details" + }, + { + "type": "text", + "bbox": [ + 0.554, + 0.58, + 0.723, + 0.793 + ], + "angle": 0, + "content": "Answer: A city skyline with a large body of water in the foreground. The city skyline is made up of tall buildings, some of which are skyscrapers. The buildings are densely packed together and vary in height and color. In the foreground, there is a large body of water that appears to be a harbor or bay. There are many boats and ships in the water, some of which are docked at the shore while others are sailing or anchored in the middle of the water." + }, + { + "type": "image_caption", + "bbox": [ + 0.39, + 0.808, + 0.607, + 0.824 + ], + "angle": 0, + "content": "Figure 25: PLM-8B Captioning." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.178, + 0.835, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.482, + 0.333, + 0.536, + 0.348 + ], + "angle": 0, + "content": "RDCap" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.352, + 0.836, + 0.391 + ], + "angle": 0, + "content": "Question: Describe the subject's actions within the red rectangles, providing start and end frames ids and a brief description for each action in a dense caption format. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.391, + 0.401, + 0.403 + ], + "angle": 0, + "content": "Answer: Frame [0, 6]: Out of frame" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.403, + 0.835, + 0.429 + ], + "angle": 0, + "content": "Frame [6, 15]: A woman is walking towards the camera with a child. She is holding the child's hand with her right hand and carrying a bag in her left hand" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.429, + 0.362, + 0.441 + ], + "angle": 0, + "content": "Frame [16, 31]: Out of frame." + }, + { + "type": "title", + "bbox": [ + 0.488, + 0.468, + 0.531, + 0.483 + ], + "angle": 0, + "content": "RCap" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.487, + 0.834, + 0.512 + ], + "angle": 0, + "content": "Question: What is happening to the subject in the region highlighted by the red rectangle during frames [6, 15] in this 32 frame video?" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.513, + 0.835, + 0.539 + ], + "angle": 0, + "content": "Answer: a woman is walking on a muddy road with a child. she is holding the child's hand with her right hand and adjusting her saree with her left hand while walking" + }, + { + "type": "title", + "bbox": [ + 0.483, + 0.543, + 0.535, + 0.557 + ], + "angle": 0, + "content": "RTLoc" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.561, + 0.835, + 0.613 + ], + "angle": 0, + "content": "Question: Please provide the start and end frame of when 'an individual standing near a water body is drinking a from a water from a bottle' happens corresponding to the region marked by the blue rectangle. Use the format [start, end] where start and end are frame numbers between 0 and 31 in this 32 frame video. Answer: [23, 26]" + }, + { + "type": "title", + "bbox": [ + 0.468, + 0.616, + 0.551, + 0.631 + ], + "angle": 0, + "content": "Captioning" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.635, + 0.576, + 0.649 + ], + "angle": 0, + "content": "Question: Please describe the video in detail, focusing on actions." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.649, + 0.835, + 0.711 + ], + "angle": 0, + "content": "Answer: This video features a group of people walking around a muddy area with a small bridge in the background. The video is taken outdoors and the weather is sunny. The video starts with a man standing on the left side of the frame, facing the camera. He is wearing a white shirt, a black vest, black trousers and white shoes. He has a scarf around his neck. The camera pans to the right and shows a group of people walking around. The video is taken from a low angle and the camera is handheld." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.723, + 0.836, + 0.75 + ], + "angle": 0, + "content": "The video ends with a man walking towards the left side of the frame. He is wearing a blue shirt, black trousers and brown shoes. He is holding a bottle in his right hand and is drinking from it." + }, + { + "type": "title", + "bbox": [ + 0.448, + 0.754, + 0.57, + 0.769 + ], + "angle": 0, + "content": "Fine-grained QA" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.772, + 0.658, + 0.786 + ], + "angle": 0, + "content": "Question: Please explain what the man in blue t-shirt is pointing at in the video." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.786, + 0.524, + 0.799 + ], + "angle": 0, + "content": "Answer: The man in blue t-shirt is pointing at the bridge." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.808, + 0.763, + 0.824 + ], + "angle": 0, + "content": "Figure 26: PLM-8B Video Region Captioning, Grounding and Fine-grained QA." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.208, + 0.835, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.285, + 0.684, + 0.298 + ], + "angle": 0, + "content": "Question: Please give me a details of the video focusing on actions and interactions." + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.298, + 0.834, + 0.374 + ], + "angle": 0, + "content": "Answer: This video features a close up shot of a hand holding a plush toy mouse with a grey kitten standing on a white chair, reaching out and touching the toy with its paws. In the beginning of the video, the kitten is seen standing on the chair and reaching out with both paws to touch the toy mouse. The kitten then withdraws its paws and sits back on the chair. The hand holding the toy mouse is seen moving the toy closer to the kitten and then away from the kitten. The kitten then reaches out again and touches the toy with its paws. The hand then moves the toy away from the kitten again and the kitten looks to the left of the frame." + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.385, + 0.45, + 0.399 + ], + "angle": 0, + "content": "This video is set against a white background." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.285, + 0.834, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.408, + 0.626, + 0.424 + ], + "angle": 0, + "content": "Figure 27: PLM-8B Video Captioning" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.666, + 0.835, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.742, + 0.57, + 0.756 + ], + "angle": 0, + "content": "Question: What are the ingredients being thrown into the mixer?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.182, + 0.756, + 0.414, + 0.769 + ], + "angle": 0, + "content": "Answer: Carrots, apples, and bananas." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.742, + 0.57, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.395, + 0.778, + 0.602, + 0.794 + ], + "angle": 0, + "content": "Figure 28: PLM-8B Video QA" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.459, + 0.108 + ], + "angle": 0, + "content": "L Limitations and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.827, + 0.205 + ], + "angle": 0, + "content": "Our PLM models achieve strong performance against open-data baselines and proprietary models alike, however there is still room for improvement in both modeling and data. On the model front, we do not experiment extensively with long video modeling components (e.g., token compression, dynamic temporal resolution). As a result, our performance on long video benchmarks [92, 94, 96] is less competitive (see Table F). PLM is compatible with such newer advancements and can be incorporated in future work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.211, + 0.828, + 0.309 + ], + "angle": 0, + "content": "Additionally, our results are sensitive to the characteristics of the base LLM. We see especially low performance of PLM on benchmarks such as MMMU [37], MME [41] and Video-MME [75] (see Tables 3 and 4), where the strongest baselines often rely on LLMs that are more verbose, but also have a likely much larger language component (see the gap to proprietary models on some benchmarks). We also note that our model performs relatively poorly on our SGQA task (Table 5), targeting a mix of perception and knowledge based questions to smart glasses. Strong chatbot-focused systems like GPT-4o excel at tasks that go beyond core perception." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.314, + 0.828, + 0.358 + ], + "angle": 0, + "content": "On the data front, our mix focuses squarely on visual perception — it does not include for example, multi-step reasoning, robotics or world-knowledge data. Despite these limitations, PLM contributes new capabilities and strong benchmark results, and set a new standard for fully reproducible VLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.376, + 0.351, + 0.394 + ], + "angle": 0, + "content": "M Broader Impact" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.827, + 0.518 + ], + "angle": 0, + "content": "Our work aims to advance open and reproducible research in vision-language modeling by releasing models, data, and benchmarks that support open research. By not having any distillation from proprietary models, we hope to improve reproducible and transparent training and evaluation of VLM research. However, like all MLLMs, our Perception Language Model (PLM) may have some risks. Even by carefully selecting datasets and apply several mitigation (CSAM, NSFW, etc.), the model may still contain hidden biases or generate inappropriate or harmful content. We took steps to reduce these risks by teaching the model to refuse answering questions related to bias, harassment, or adult content. We also remove all samples containing any mention of human faces from all the datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.826, + 0.595 + ], + "angle": 0, + "content": "We also annotate and release a large-scale dataset for fine-grained video question answering and spatio-temporal grounding. This release has the potential to significantly advance research in image and video understanding. Making the dataset openly available allows others to reproduce our work and invites broader community involvement. This transparency supports safer and more accountable progress, helping researchers better understand and address potential biases or limitations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.827, + 0.63 + ], + "angle": 0, + "content": "We believe that by openly sharing our models and data, while actively addressing ethical concerns, our work can contribute positively to vision-language research." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.113, + 0.826, + 0.141 + ], + "angle": 0, + "content": "[1] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.152, + 0.826, + 0.192 + ], + "angle": 0, + "content": "[2] Erfei Cui, Yinan He, Zheng Ma, Zhe Chen, Hao Tian, Weiyun Wang, Kunchang Li, Yi Wang, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, Yali Wang, Limin Wang, Yu Qiao, and Jifeng Dai. Sharegpt-4o: Comprehensive multimodal annotations with gpt-4o, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.204, + 0.826, + 0.244 + ], + "angle": 0, + "content": "[3] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. In European Conference on Computer Vision, pages 370-387. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.255, + 0.826, + 0.283 + ], + "angle": 0, + "content": "[4] Farre Miquel, Marafioti Andres, Tunstall Lewis, von Werra Leandro, Conghui He, Cuenca Pedro, and Wolf Thomas. Finevideo: behind the scenes, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.295, + 0.826, + 0.322 + ], + "angle": 0, + "content": "[5] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.333, + 0.826, + 0.384 + ], + "angle": 0, + "content": "[6] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, Li Yuan, Yu Qiao, Dahua Lin, Feng Zhao, and Jiaqi Wang. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.397, + 0.826, + 0.425 + ], + "angle": 0, + "content": "[7] Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle-2: Faster inference of language models with dynamic draft trees, 2024b. URL https://arxiv.org/abs/2406.16858, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.436, + 0.826, + 0.465 + ], + "angle": 0, + "content": "[8] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, and Jingjing Liu. Hero: Hierarchical encoder for video+ language omni-representation pre-training. arXiv preprint arXiv:2005.00200, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.476, + 0.826, + 0.515 + ], + "angle": 0, + "content": "[9] Zhu Zhang, Zhou Zhao, Yang Zhao, Qi Wang, Huasheng Liu, and Lianli Gao. Where does it exist: Spatio-temporal video grounding for multi-form sentences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10668-10677, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.527, + 0.826, + 0.567 + ], + "angle": 0, + "content": "[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.578, + 0.826, + 0.706 + ], + "angle": 0, + "content": "[11] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Jen Dumas, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weis, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.718, + 0.826, + 0.746 + ], + "angle": 0, + "content": "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava- next: Improved reasoning,OCR,and world knowledge, January 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.757, + 0.826, + 0.797 + ], + "angle": 0, + "content": "[13] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.809, + 0.826, + 0.86 + ], + "angle": 0, + "content": "[14] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Anwen Hu, Haowei Liu, Qi Qian, Ji Zhang, and Fei Huang. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13040–13051, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.873, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[15] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.113, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.092, + 0.827, + 0.17 + ], + "angle": 0, + "content": "[16] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. arXiv preprint arXiv:2204.14198, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.178, + 0.827, + 0.219 + ], + "angle": 0, + "content": "[17] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pretraining for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.226, + 0.827, + 0.267 + ], + "angle": 0, + "content": "[18] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.274, + 0.827, + 0.325 + ], + "angle": 0, + "content": "[19] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.335, + 0.827, + 0.374 + ], + "angle": 0, + "content": "[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.383, + 0.825, + 0.411 + ], + "angle": 0, + "content": "[21] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.419, + 0.825, + 0.447 + ], + "angle": 0, + "content": "[22] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Khan. Videogpt+: Integrating image and video encoders for enhanced video understanding. arXiv preprint arXiv:2406.09418, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.454, + 0.825, + 0.482 + ], + "angle": 0, + "content": "[23] Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.49, + 0.825, + 0.53 + ], + "angle": 0, + "content": "[24] Jiajun Liu, Yibing Wang, Hanghang Ma, Xiaoping Wu, Xiaoqi Ma, Xiaoming Wei, Jianbin Jiao, Enhua Wu, and Jie Hu. Kangaroo: A powerful video-language model supporting long-context video input. arXiv preprint arXiv:2408.15542, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.538, + 0.825, + 0.578 + ], + "angle": 0, + "content": "[25] Xiaoqian Shen, Yunyang Xiong, Changsheng Zhao, Lemeng Wu, Jun Chen, Chenchen Zhu, Zechun Liu, Fanyi Xiao, Balakrishnan Varadarajan, Florian Bordes, et al. Longvu: Spatiotemporal adaptive compression for long video-language understanding. arXiv preprint arXiv:2410.17434, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.586, + 0.825, + 0.626 + ], + "angle": 0, + "content": "[26] Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.634, + 0.825, + 0.662 + ], + "angle": 0, + "content": "[27] Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.67, + 0.827, + 0.709 + ], + "angle": 0, + "content": "[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.718, + 0.827, + 0.77 + ], + "angle": 0, + "content": "[29] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.778, + 0.825, + 0.818 + ], + "angle": 0, + "content": "[30] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.827, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[31] Rohan Choudhury, Guanglei Zhu, Sihan Liu, Koichiro Niinuma, Kris M Kitani, and László Jeni. Don't look twice: Faster video transformers with run-length tokenization. arXiv preprint arXiv:2411.05222, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.874, + 0.466, + 0.89 + ], + "angle": 0, + "content": "[32] OpenAI. Gpt-4v(ision) system card, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.898, + 0.428, + 0.913 + ], + "angle": 0, + "content": "[33] OpenAI. Gpt-4o system card, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.092, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.119 + ], + "angle": 0, + "content": "[34] Gemini Team Google. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.126, + 0.826, + 0.154 + ], + "angle": 0, + "content": "[35] Gemini Team Google. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.161, + 0.61, + 0.175 + ], + "angle": 0, + "content": "[36] Anthropic. The claude 3 model family: Opus, sonnet, haiku. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.183, + 0.825, + 0.234 + ], + "angle": 0, + "content": "[37] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9556-9567, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.242, + 0.825, + 0.282 + ], + "angle": 0, + "content": "[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.289, + 0.826, + 0.316 + ], + "angle": 0, + "content": "[39] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-okvqa: A benchmark for visual question answering using world knowledge, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.323, + 0.826, + 0.375 + ], + "angle": 0, + "content": "[40] Jeffrey P Bigham, Chandrika Jayant, Hanjie Ji, Greg Little, Andrew Miller, Robert C Miller, Robin Miller, Aubrey Tatarowicz, Brandyn White, Samual White, et al. Vizwiz: nearly real-time answers to visual questions. In Proceedings of the 23nd annual ACM symposium on User interface software and technology, pages 333-342, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.383, + 0.825, + 0.422 + ], + "angle": 0, + "content": "[41] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.43, + 0.825, + 0.468 + ], + "angle": 0, + "content": "[42] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.477, + 0.825, + 0.516 + ], + "angle": 0, + "content": "[43] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.524, + 0.826, + 0.563 + ], + "angle": 0, + "content": "[44] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. In European Conference on Computer Vision, pages 148-166, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.571, + 0.826, + 0.584 + ], + "angle": 0, + "content": "[45] xai. RealworldQA benchmark. https://huggingface.co/datasets/xai-org/RealworldQA, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.592, + 0.826, + 0.63 + ], + "angle": 0, + "content": "[46] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.639, + 0.826, + 0.665 + ], + "angle": 0, + "content": "[47] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.673, + 0.825, + 0.712 + ], + "angle": 0, + "content": "[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.72, + 0.826, + 0.771 + ], + "angle": 0, + "content": "[49] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.78, + 0.825, + 0.819 + ], + "angle": 0, + "content": "[50] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8948-8957, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.827, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[51] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.873, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[52] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.947 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "[53] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. Docvqa: A dataset for vqa on document images. In 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 2199-2208, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.14, + 0.825, + 0.169 + ], + "angle": 0, + "content": "[54] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. arXiv preprint arXiv:2407.21038, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.175, + 0.825, + 0.202 + ], + "angle": 0, + "content": "[55] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.209, + 0.826, + 0.248 + ], + "angle": 0, + "content": "[56] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C. V. Jawahar. Infographicvqa. In 2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 2582-2591, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.256, + 0.826, + 0.297 + ], + "angle": 0, + "content": "[57] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.304, + 0.825, + 0.332 + ], + "angle": 0, + "content": "[58] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.339, + 0.826, + 0.379 + ], + "angle": 0, + "content": "[59] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.386, + 0.825, + 0.426 + ], + "angle": 0, + "content": "[60] Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. From recognition to cognition: Visual commonsense reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6720-6731, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.433, + 0.826, + 0.497 + ], + "angle": 0, + "content": "[61] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 2507-2521. Curran Associates, Inc., 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.506, + 0.826, + 0.546 + ], + "angle": 0, + "content": "[62] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.554, + 0.825, + 0.593 + ], + "angle": 0, + "content": "[63] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.601, + 0.825, + 0.628 + ], + "angle": 0, + "content": "[64] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.636, + 0.826, + 0.675 + ], + "angle": 0, + "content": "[65] Jierun Chen, Fangyun Wei, Jinjing Zhao, Sizhe Song, Bohuai Wu, Zhuoxuan Peng, S-H Gary Chan, and Hongyang Zhang. Revisiting referring expression comprehension evaluation in the era of large multimodal models. arXiv preprint arXiv:2406.16866, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.683, + 0.826, + 0.723 + ], + "angle": 0, + "content": "[66] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.73, + 0.826, + 0.783 + ], + "angle": 0, + "content": "[67] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.79, + 0.825, + 0.818 + ], + "angle": 0, + "content": "[68] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.825, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[69] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.873, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[70] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[71] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.141, + 0.825, + 0.18 + ], + "angle": 0, + "content": "[72] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. Star: A benchmark for situated reasoning in real-world videos. In Thirty-fifth Conference on Neural Information Processing Systems (NeurIPS), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.188, + 0.826, + 0.228 + ], + "angle": 0, + "content": "[73] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. Tgif-qa: Toward spatiotemporal reasoning in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2758–2766, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.236, + 0.824, + 0.264 + ], + "angle": 0, + "content": "[74] Jie Lei, Licheng Yu, Mohit Bansal, and Tamara L Berg. Tvqa: Localized, compositional video question answering. arXiv preprint arXiv:1809.01696, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.273, + 0.826, + 0.312 + ], + "angle": 0, + "content": "[75] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal ILms in video analysis. arXiv preprint arXiv:2405.21075, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.321, + 0.826, + 0.36 + ], + "angle": 0, + "content": "[76] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 9127–9134, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.369, + 0.826, + 0.409 + ], + "angle": 0, + "content": "[77] Munan Ning, Bin Zhu, Yujia Xie, Bin Lin, Jiaxi Cui, Lu Yuan, Dongdong Chen, and Li Yuan. Video-bench: A comprehensive benchmark and toolkit for evaluating video-based large language models. arXiv preprint arXiv:2311.16103, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.418, + 0.824, + 0.445 + ], + "angle": 0, + "content": "[78] Jianrui Zhang, Mu Cai, and Yong Jae Lee. Vinoground: Scrutinizing Imms over dense temporal reasoning with short videos. arXiv preprint arXiv:2410.02763, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.453, + 0.826, + 0.492 + ], + "angle": 0, + "content": "[79] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.501, + 0.826, + 0.528 + ], + "angle": 0, + "content": "[80] Daniel Cores, Michael Dorkenwald, Manuel Mucientes, Cees GM Snoek, and Yuki M Asano. Tvbench: Redesigning video-language evaluation. arXiv preprint arXiv:2410.07752, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.537, + 0.826, + 0.575 + ], + "angle": 0, + "content": "[81] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.585, + 0.826, + 0.625 + ], + "angle": 0, + "content": "[82] David Chen and William B Dolan. Collecting highly parallel data for paraphrase evaluation. In Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies, pages 190-200, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.633, + 0.826, + 0.661 + ], + "angle": 0, + "content": "[83] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI conference on artificial intelligence, volume 32, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.669, + 0.826, + 0.709 + ], + "angle": 0, + "content": "[84] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4581-4591, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.718, + 0.826, + 0.745 + ], + "angle": 0, + "content": "[85] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.753, + 0.826, + 0.78 + ], + "angle": 0, + "content": "[86] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.788, + 0.826, + 0.828 + ], + "angle": 0, + "content": "[87] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D Manning. Auroracap: Efficient, performant video detailed captioning and a new benchmark. arXiv preprint arXiv:2410.03051, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.837, + 0.826, + 0.876 + ], + "angle": 0, + "content": "[88] Yuxuan Wang, Yueqian Wang, Dongyan Zhao, Cihang Xie, and Zilong Zheng. Videohallucer: Evaluating intrinsic and extrinsic hallucinations in large video-language models. arXiv preprint arXiv:2406.16338, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.885, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[89] Jiacheng Zhang, Yang Jiao, Shaoxiang Chen, Jingjing Chen, and Yu-Gang Jiang. Eventhallusion: Diagnosing event hallucinations in video llms. arXiv preprint arXiv:2409.16597, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.947 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "[90] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.141, + 0.825, + 0.18 + ], + "angle": 0, + "content": "[91] Ruchit Rawal, Khalid Saifullah, Miquel Farré, Ronen Basri, David Jacobs, Gowthami Somepalli, and Tom Goldstein. Cinepile: A long video question answering dataset and benchmark. arXiv preprint arXiv:2405.08813, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.189, + 0.826, + 0.23 + ], + "angle": 0, + "content": "[92] Weihan Wang, Zehai He, Wenyi Hong, Yean Cheng, Xiaohan Zhang, Ji Qi, Xiaotao Gu, Shiyu Huang, Bin Xu, Yuxiao Dong, et al. Lvbench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.239, + 0.826, + 0.279 + ], + "angle": 0, + "content": "[93] Makarand Tapaswi, Yukun Zhu, Rainer Stiefelhagen, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. Movieqa: Understanding stories in movies through question-answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4631–4640, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.288, + 0.829, + 0.327 + ], + "angle": 0, + "content": "[94] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37:28828-28857, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.336, + 0.826, + 0.389 + ], + "angle": 0, + "content": "[95] Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18221-18232, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.397, + 0.826, + 0.438 + ], + "angle": 0, + "content": "[96] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.446, + 0.826, + 0.486 + ], + "angle": 0, + "content": "[97] Guo Chen, Yicheng Liu, Yifei Huang, Yuping He, Baoqi Pei, Jilan Xu, Yali Wang, Tong Lu, and Limin Wang. Cg-bench: Clue-grounded question answering benchmark for long video understanding. arXiv preprint arXiv:2412.12075, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.495, + 0.826, + 0.535 + ], + "angle": 0, + "content": "[98] Orr Zohar, Xiaohan Wang, Yann Dubois, Nikhil Mehta, Tong Xiao, Philippe Hansen-Estruch, Licheng Yu, Xiaofang Wang, Felix Juefei-Xu, Ning Zhang, et al. Apollo: An exploration of video understanding in large multimodal models. arXiv preprint arXiv:2412.10360, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.544, + 0.826, + 0.583 + ], + "angle": 0, + "content": "[99] Mu Cai, Reuben Tan, Jianrui Zhang, Bocheng Zou, Kai Zhang, Feng Yao, Fangrui Zhu, Jing Gu, Yiwu Zhong, Yuzhang Shang, et al. Temporalbench: Benchmarking fine-grained temporal understanding for multimodal video models. arXiv preprint arXiv:2410.10818, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.592, + 0.826, + 0.633 + ], + "angle": 0, + "content": "[100] Ziyao Shangguan, Chuhan Li, Yuxuan Ding, Yanan Zheng, Yilun Zhao, Tesca Fitzgerald, and Arman Cohan. Tomato: Assessing visual temporal reasoning capabilities in multimodal foundation models. arXiv preprint arXiv:2410.23266, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.641, + 0.826, + 0.682 + ], + "angle": 0, + "content": "[101] Wenyi Hong, Yean Cheng, Zhuoyi Yang, Weihan Wang, Lefan Wang, Xiaotao Gu, Shiyu Huang, Yuxiao Dong, and Jie Tang. Motionbench: Benchmarking and improving fine-grained video motion understanding for vision language models. arXiv preprint arXiv:2501.02955, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.69, + 0.826, + 0.729 + ], + "angle": 0, + "content": "[102] Yuanxin Liu, Shicheng Li, Yi Liu, Yuxiang Wang, Shuhuai Ren, Lei Li, Sishuo Chen, Xu Sun, and Lu Hou. Tempcompass: Do video llms really understand videos? arXiv preprint arXiv:2403.00476, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.826, + 0.779 + ], + "angle": 0, + "content": "[103] Mohammadreza Salehi, Jae Sung Park, Tanush Yadav, Aditya Kusupati, Ranjay Krishna, Yejin Choi, Hannaneh Hajishirzi, and Ali Farhadi. Actionatlas: A videoqa benchmark for domain-specialized action recognition. arXiv preprint arXiv:2410.05774, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.788, + 0.826, + 0.828 + ], + "angle": 0, + "content": "[104] Daniel Bolya, Po-Yao Huang, Peize Sun, Jang Hyun Cho, Andrea Madotto, Chen Wei, Tengyu Ma, Jiale Zhi, Jathushan Rajasegaran, Hanoona Rasheed, et al. Perception encoder: The best visual embeddings are not at the output of the network. arXiv preprint arXiv:2504.13181, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.836, + 0.826, + 0.876 + ], + "angle": 0, + "content": "[105] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.884, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[106] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.092, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.145 + ], + "angle": 0, + "content": "[107] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Intervl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.155, + 0.434, + 0.17 + ], + "angle": 0, + "content": "[108] Brandon Castellano. PySceneDetect." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.181, + 0.826, + 0.233 + ], + "angle": 0, + "content": "[109] Ahmed Masry, Do Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Findings of the Association for Computational Linguistics: ACL 2022, pages 2263-2279, Dublin, Ireland, May 2022. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.244, + 0.826, + 0.283 + ], + "angle": 0, + "content": "[110] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.294, + 0.826, + 0.321 + ], + "angle": 0, + "content": "[111] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.332, + 0.826, + 0.371 + ], + "angle": 0, + "content": "[112] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.382, + 0.826, + 0.421 + ], + "angle": 0, + "content": "[113] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.433, + 0.826, + 0.472 + ], + "angle": 0, + "content": "[114] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.483, + 0.826, + 0.673 + ], + "angle": 0, + "content": "[115] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonio Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, Miguel Martin, Tushar Nagarajan, Ilija Radosavovic, Santhosh Kumar Ramakrishnan, Fiona Ryan, Jayant Sharma, Michael Wray, Mengmeng Xu, Eric Zhongcong Xu, Chen Zhao, Siddhant Bansal, Dhruv Batra, Vincent Cartillier, Sean Crane, Tien Do, Morrie Doulaty, Akshay Erapalli, Christoph Feichtenhofer, Adriano Fragomeni, Qichen Fu, Abraham Gebreselasie, Cristina Gonzalez, James Hillis, Xuhua Huang, Yifei Huang, Wenqi Jia, Weslie Khoo, Jachym Kolar, Satwik Kottur, Anurag Kumar, Federico Landini, Chao Li, Yanghao Li, Zhenqiang Li, Karttikeya Mangalam, Raghava Modhugu, Jonathan Munro, Tullie Murrell, Takumi Nishiyasu, Will Price, Paola Ruiz Puentes, Merey Ramazanova, Leda Sari, Kiran Somasundaram, Audrey Southerland, Yusuke Sugano, Ruijie Tao, Minh Vo, Yuchen Wang, Xindi Wu, Takuma Yagi, Ziwei Zhao, Yunyi Zhu, Pablo Arbelaez, David Crandall, Dima Damen, Giovanni Maria Farinella, Christian Fuegen, Bernard Ghanem, Vamsi Krishna Ithapu, C. V. Jawahar, Hanbyul Joo, Kris Kitani, Haizhou Li, Richard Newcombe, Aude Oliva, Hyun Soo Park, James M. Rehg, Yoichi Sato, Jianbo Shi, Mike Zheng Shou, Antonio Torralba, Lorenzo Torresani, Mingfei Yan, and Jitendra Malik. Ego4d: Around the world in 3,000 hours of egocentric video. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.684, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[116] Kristen Grauman, Andrew Westbury, Lorenzo Torresani, Kris Kitani, Jitendra Malik, Triantafyllos Afouras, Kumar Ashutosh, Vijay Baiyya, Siddhant Bansal, Bikram Boote, Eugene Byrne, Zachary Chavis, Joya Chen, Feng Cheng, Fu-Jen Chu, Sean Crane, Avijit Dasgupta, Jing Dong, María Escobar, Cristhian Forigua, Abraham Kahsay Gebreselasie, Sanjay Haresh, Jing Huang, Md Mohaiminul Islam, Suyog Dutt Jain, Rawal Khirodkar, Devansh Kukreja, Kevin J Liang, Jia-Wei Liu, Sagnik Majumder, Yongsen Mao, Miguel Martin, Effrosyni Mavroudi, Tushar Nagarajan, Francesco Ragusa, Santhosh K. Ramakrishnan, Luigi Seminara, Arjun Somayazulu, Yale Song, Shan Su, Zihui Xue, Edward Zhang, Jinxu Zhang, Angela Castillo, Changan Chen, Xinzhu Fu, Ryosuke Furuta, Cristina Gonzalez, Prince Gupta, Jiabo Hu, Yifei Huang, Yiming Huang, Weslie Khoo, Anush Kumar, Robert Kuo, Sach Lakhavani, Miao Liu, Mingjing Luo, Zhengyi Luo, Brighid Meredith, Austin Miller, Oluwatuminu Oguntola, Xiaqing Pan, Penny Peng, Shraman Pramanick, Merey Ramazanova, Fiona Ryan, Wei Shan, Kiran Somasundaram, Chenan Song, Audrey Southerland, Masatoshi Tateno, Huiyu Wang, Yuchen Wang, Takuma Yagi, Mingfei Yan, Xitong Yang, Zecheng Yu, Shengxin Cindy Zha, Chen Zhao, Ziwei Zhao, Zhifan Zhu, Jeff Zhuo, Pablo Arbeláez, Gedas Bertasius, David J. Crandall, Dima Damen, Jakob Julian Engel, Giovanni Maria Farinella, Antonino Furnari, Bernard Ghanem, Judy Hoffman, C. V. Jawahar, Richard A. Newcombe, Hyun Soo Park, James M. Rehg, Yoichi Sato, Manolis Savva, Jianbo Shi, Mike Zheng Shou, and Michael Wray. Ego-exo4d: Understanding skilled human activity from first- and third-person perspectives. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19383-19400, 2023." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[117] Yansong Tang, Dajun Wang, Zhenyu Xu, Jingjing Liu, Xiaoyong Wang, Xing Gao, Jinhui Tang, and Dong Wu. Coin: A large-scale dataset for comprehensive instructional video analysis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.139, + 0.827, + 0.18 + ], + "angle": 0, + "content": "[118] Dimitri Zhukov, Jean-Baptiste Alayrac, Chen Sun, Ivan Laptev, Cordelia Schmid, and Josef Sivic. Cross-task weakly supervised learning from instructional videos. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.187, + 0.827, + 0.227 + ], + "angle": 0, + "content": "[119] Thong Thanh Nguyen, Zhiyuan Hu, Xiaobao Wu, Cong-Duy T Nguyen, See-Kiong Ng, and Anh Tuan Luu. Encoding and controlling global semantics for long-form video question answering. arXiv preprint arXiv:2405.19723, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.235, + 0.827, + 0.274 + ], + "angle": 0, + "content": "[120] Kexin Yi, Chuang Gan, Yunzhu Li, Pushmeet Kohli, Jiajun Wu, Antonio Torralba, and Joshua B Tenenbaum. Clevrer: Collision events for video representation and reasoning. arXiv preprint arXiv:1910.01442, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.282, + 0.827, + 0.322 + ], + "angle": 0, + "content": "[121] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.33, + 0.827, + 0.382 + ], + "angle": 0, + "content": "[122] Raghav Goyal, Samira Ebrahimi Kahou, Vincent Michalski, Joanna Materzynska, Susanne Westphal, Heuna Kim, Valentin Haenel, Ingo Fruend, Peter Yianilos, Moritz Mueller-Freitag, et al. The\" something something\" video database for learning and evaluating visual common sense. In Proceedings of the IEEE international conference on computer vision, pages 5842-5850, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.39, + 0.825, + 0.43 + ], + "angle": 0, + "content": "[123] Paul Voigtlaender, Soravit Changpinyo, Jordi Pont-Tuset, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with video localized narratives. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2461-2471, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.438, + 0.825, + 0.478 + ], + "angle": 0, + "content": "[124] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.485, + 0.827, + 0.525 + ], + "angle": 0, + "content": "[125] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.533, + 0.827, + 0.585 + ], + "angle": 0, + "content": "[126] Soichiro Fujita, Tsutomu Hirao, Hidetakam Kamigaito, Manabu Okumura, and Masaaki Nagata. Soda: Story oriented dense video captioning evaluation framework. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VI 16, pages 517-531. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.825, + 0.621 + ], + "angle": 0, + "content": "[127] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 11:635-651, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.628, + 0.825, + 0.655 + ], + "angle": 0, + "content": "[128] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.663, + 0.825, + 0.703 + ], + "angle": 0, + "content": "[129] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.711, + 0.825, + 0.737 + ], + "angle": 0, + "content": "[130] Tri Dao. Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.746, + 0.825, + 0.773 + ], + "angle": 0, + "content": "[131] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Alan Lerer. Automatic differentiation in pytorch, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.781, + 0.655, + 0.795 + ], + "angle": 0, + "content": "[132] Montalvo Pablo and Wightman Ross. PDF association dataset (pdfa), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.803, + 0.67, + 0.818 + ], + "angle": 0, + "content": "[133] Montalvo Pablo and Wightman Ross. Industry documents library (idl), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.825, + 0.825, + 0.864 + ], + "angle": 0, + "content": "[134] Lei Li, Yuqi Wang, Runxin Xu, Peiyi Wang, Xiachong Feng, Lingpeng Kong, and Qi Liu. Multimodal arxiv: A dataset for improving scientific comprehension of large vision-language models. arXiv preprint arXiv:2403.00231, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[135] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.144 + ], + "angle": 0, + "content": "[136] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, Tom Duerig, and Vittorio Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. IJCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.153, + 0.826, + 0.192 + ], + "angle": 0, + "content": "[137] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in neural information processing systems, 34:23634-23651, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.201, + 0.826, + 0.252 + ], + "angle": 0, + "content": "[138] Mathew Monfort, SouYoung Jin, Alexander Liu, David Harwath, Rogerio Feris, James Glass, and Aude Oliva. Spoken moments: Learning joint audio-visual representations from video descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14871–14881, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.261, + 0.826, + 0.313 + ], + "angle": 0, + "content": "[139] Gunnar A Sigurdsson, Gúl Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 510-526. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.322, + 0.826, + 0.361 + ], + "angle": 0, + "content": "[140] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pages 5803-5812, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.37, + 0.826, + 0.41 + ], + "angle": 0, + "content": "[141] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with \\(2.8\\mathrm{m}\\) challenging questions. arXiv preprint arXiv:2502.13124, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.418, + 0.826, + 0.445 + ], + "angle": 0, + "content": "[142] Kushal Kafle, Scott Cohen, Brian Price, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In CVPR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.453, + 0.826, + 0.492 + ], + "angle": 0, + "content": "[143] Nitesh Methani, Pritha Ganguly, Mitesh M. Khapra, and Pratyush Kumar. Plotqa: Reasoning over scientific plots. In The IEEE Winter Conference on Applications of Computer Vision (WACV), March 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.501, + 0.826, + 0.528 + ], + "angle": 0, + "content": "[144] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.537, + 0.826, + 0.577 + ], + "angle": 0, + "content": "[145] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 947-952, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.584, + 0.826, + 0.612 + ], + "angle": 0, + "content": "[146] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.62, + 0.826, + 0.647 + ], + "angle": 0, + "content": "[147] Samira Ebrahimi Kahou, Vincent Michalski, Adam Atkinson, Akos Kadar, Adam Trischler, and Yoshua Bengio. Figureqa: An annotated figure dataset for visual reasoning, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.655, + 0.826, + 0.708 + ], + "angle": 0, + "content": "[148] Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. The hateful memes challenge: Detecting hate speech in multimodal memes. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 2611-2624. Curran Associates, Inc., 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.716, + 0.826, + 0.743 + ], + "angle": 0, + "content": "[149] Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C. Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.752, + 0.826, + 0.803 + ], + "angle": 0, + "content": "[150] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. In The 35th Conference on Neural Information Processing Systems (NeurIPS) Track on Datasets and Benchmarks, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.812, + 0.826, + 0.839 + ], + "angle": 0, + "content": "[151] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.848, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[152] Yilun Zhao, Chen Zhao, Linyong Nan, Zhenting Qi, Wenlin Zhang, Xiangru Tang, Boyu Mi, and Dragomir Radev. Robut: A systematic study of table qa robustness against human-annotated adversarial perturbations. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6064–6081, Toronto, Canada, July 2023. Association for Computational Linguistics." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "[153] Hugo Laurençon, Léo Tronchon, and Victor Sanh. Unlocking the conversion of web screenshots into html code with the websight dataset, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.127, + 0.825, + 0.155 + ], + "angle": 0, + "content": "[154] Yuke Zhu, Oliver Groth, Michael Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. In IEEE Conference on Computer Vision and Pattern Recognition, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.163, + 0.826, + 0.19 + ], + "angle": 0, + "content": "[155] Manoj Acharya, Kushal Kafle, and Christopher Kanan. Tallyqa: Answering complex counting questions. In AAAI, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.199, + 0.825, + 0.226 + ], + "angle": 0, + "content": "[156] Jonas Belouadi, Anne Lauscher, and Steffen Eger. Automatikz: Text-guided synthesis of scientific vector graphics with tikz, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.235, + 0.827, + 0.274 + ], + "angle": 0, + "content": "[157] Mengye Ren, Ryan Kiros, and Richard Zemel. Exploring models and data for image question answering. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.282, + 0.827, + 0.334 + ], + "angle": 0, + "content": "[158] Jason Obeid and Enamul Hoque. Chart-to-text: Generating natural language descriptions for charts by adapting the transformer model. In Brian Davis, Yvette Graham, John Kelleher, and Yaji Sripada, editors, Proceedings of the 13th International Conference on Natural Language Generation, pages 138-147, Dublin, Ireland, December 2020. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.343, + 0.825, + 0.371 + ], + "angle": 0, + "content": "[159] Benny J. Tang, Angie Boggust, and Arvind Satyanarayan. Vistext: A benchmark for semantically rich chart captioning. In The Annual Meeting of the Association for Computational Linguistics (ACL), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.379, + 0.826, + 0.456 + ], + "angle": 0, + "content": "[160] Zhiyu Chen, Wenhu Chen, Charese Smiley, Sameena Shah, Iana Borova, Dylan Langdon, Reema Moussa, Matt Beane, Ting-Hao Huang, Bryan Routledge, and William Yang Wang. Finqa: A dataset of numerical reasoning over financial data. In Marie-Francine Moens, Xuanjing Huang, Lucia Specia, and Scott Wen-tau Yih, editors, Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3697-3711, Online and Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.465, + 0.827, + 0.505 + ], + "angle": 0, + "content": "[161] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marcal Rusinol, C.V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4290-4300, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.513, + 0.827, + 0.579 + ], + "angle": 0, + "content": "[162] Fengbin Zhu, Wenqiang Lei, Youcheng Huang, Chao Wang, Shuo Zhang, Jiancheng Lv, Fuli Feng, and Tat-Seng Chua. Tat-qa: A question answering benchmark on a hybrid of tabular and textual content in finance. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3277–3287, Online, August 2021. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.586, + 0.428, + 0.601 + ], + "angle": 0, + "content": "[163] Chris Wendler. Renderedtext, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.609, + 0.825, + 0.648 + ], + "angle": 0, + "content": "[164] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. Raven: A dataset for relational and analogical visual reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.657, + 0.825, + 0.684 + ], + "angle": 0, + "content": "[165] Urs-Viktor Marti and H. Bunke. Theiam-database:An english sentence database for offline handwriting recognition.International Journal on Document Analysis and Recognition,5:39-46,11 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.692, + 0.825, + 0.732 + ], + "angle": 0, + "content": "[166] Pan Lu, Liang Qiu, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, Tanmay Rajpurohit, Peter Clark, and Ashwin Kalyan. Dynamic prompt learning via policy gradient for semi-structured mathematical reasoning. In International Conference on Learning Representations (ICLR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.741, + 0.825, + 0.768 + ], + "angle": 0, + "content": "[167] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.776, + 0.827, + 0.829 + ], + "angle": 0, + "content": "[168] Bryan Wang, Gang Li, Xin Zhou, Zhourong Chen, Tovi Grossman, and Yang Li. Screen2words: Automatic mobile ui summarization with multimodal learning. In The 34th Annual ACM Symposium on User Interface Software and Technology, UIST '21, page 498-510, New York, NY, USA, 2021. Association for Computing Machinery." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.837, + 0.671, + 0.851 + ], + "angle": 0, + "content": "[169] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.86, + 0.825, + 0.911 + ], + "angle": 0, + "content": "[170] Aniruddha Kembhavi, Minjoon Seo, Dustin Schwenk, Jonghyun Choi, Ali Farhadi, and Hannaneh Hajishirzi. Are you smarter than a sixth grader? textbook question answering for multimodal machine comprehension. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5376-5384, 2017." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "[171] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. Visualmrc: Machine reading comprehension on document images. In AAAI, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.127, + 0.826, + 0.157 + ], + "angle": 0, + "content": "[172] Jason Lau, Soumya Gayen, Asma Ben Abacha, and Dina Demner-Fushman. A dataset of clinically generated visual questions and answers about radiology images. Scientific Data, 5:180251, 11 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.163, + 0.826, + 0.229 + ], + "angle": 0, + "content": "[173] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, Jian-Guang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio, editors, Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1094-1110, Dublin, Ireland, May 2022. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.236, + 0.826, + 0.289 + ], + "angle": 0, + "content": "[174] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. In The Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.298, + 0.434, + 0.312 + ], + "angle": 0, + "content": "[175] Diagram image to text dataset, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.32, + 0.825, + 0.348 + ], + "angle": 0, + "content": "[176] Bo Li, Yuanhan Zhang, Liangyu Chen, Jinghao Wang, Fanyi Pu, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Mimic-it: Multi-modal in-context instruction tuning, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.356, + 0.826, + 0.408 + ], + "angle": 0, + "content": "[177] Yilun Zhao, Yunxiang Li, Chenying Li, and Rui Zhang. Multihiertt: Numerical reasoning over multi hierarchical tabular and textual data. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6588-6600, Dublin, Ireland, May 2022. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.416, + 0.826, + 0.469 + ], + "angle": 0, + "content": "[178] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Anna Korhonen, David Traum, and Lluis Márquez, editors, Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6418-6428, Florence, Italy, July 2019. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.477, + 0.826, + 0.53 + ], + "angle": 0, + "content": "[179] Harsh Jhamtani et al. Learning to describe differences between pairs of similar images. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4024-4034, Brussels, Belgium, October-November 2018. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.538, + 0.826, + 0.578 + ], + "angle": 0, + "content": "[180] Haoping Bai, Shancong Mou, Tatiana Likhomanenko, Ramazan Gokberk Cinbis, Oncel Tuzel, Ping Huang, Jiulong Shan, Jianjun Shi, and Meng Cao. Vision datasets: A benchmark for vision-based industrial inspection. arXiv preprint arXiv:2306.07890, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.586, + 0.826, + 0.627 + ], + "angle": 0, + "content": "[181] Tanmay Gupta, Dustin Schwenk, Ali Farhadi, Derek Hoiem, and Aniruddha Kembhavi. Imagine this! scripts to compositions to videos. In Proceedings of the European conference on computer vision (ECCV), pages 598-613, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.634, + 0.824, + 0.662 + ], + "angle": 0, + "content": "[182] Benno Krojer, Vaibhav Adlakha, Vibhav Vineet, Yash Goyal, Edoardo Ponti, and Siva Reddy. Image retrieval from contextual descriptions. arXiv preprint arXiv:2203.15867, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.67, + 0.826, + 0.71 + ], + "angle": 0, + "content": "[183] Phillip Isola, Joseph J Lim, and Edward H Adelson. Discovering states and transformations in image collections. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1383-1391, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.718, + 0.826, + 0.758 + ], + "angle": 0, + "content": "[184] Yingshan Chang, Mridu Narang, Hisami Suzuki, Guihong Cao, Jianfeng Gao, and Yonatan Bisk. Webqa: Multihop and multimodal qa. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16495-16504, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.766, + 0.826, + 0.794 + ], + "angle": 0, + "content": "[185] Maxwell Forbes, Christine Kaeser-Chen, Piyush Sharma, and Serge Belongie. Neural naturalist: Generating fine-grained image comparisons. arXiv preprint arXiv:1909.04101, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.801, + 0.826, + 0.842 + ], + "angle": 0, + "content": "[186] Hareesh Ravi, Kushal Kafle, Scott Cohen, Jonathan Brandt, and Mubbasir Kapadia. Aesop: Abstract encoding of stories, objects, and pictures. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2052-2063, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.849, + 0.826, + 0.878 + ], + "angle": 0, + "content": "[187] Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Nazli Ikizler-Cinbis. Recipeqa: A challenge dataset for multimodal comprehension of cooking recipes. arXiv preprint arXiv:1809.00812, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.885, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[188] Dong Huk Park, Trevor Darrell, and Anna Rohrbach. Robust change captioning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4624-4633, 2019." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[189] Rumeysa Bodur, Erhan Gundogdu, Binod Bhattarai, Tae-Kyun Kim, Michael Donoser, and Loris Bazzani. iedit: Localised text-guided image editing with weak supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7426-7435, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.139, + 0.826, + 0.205 + ], + "angle": 0, + "content": "[190] Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Chengqing Zong and Michael Strube, editors, Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1470–1480, Beijing, China, July 2015. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.212, + 0.825, + 0.24 + ], + "angle": 0, + "content": "[191] Ye Yuan, Xiao Liu, Wondimu Dikubab, Hui Liu, Zhilong Ji, Zhongqin Wu, and Xiang Bai. Syntax-aware network for handwritten mathematical expression recognition. arXiv preprint arXiv:2203.01601, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.825, + 0.286 + ], + "angle": 0, + "content": "[192] Yasumasa Onoe, Sunayana Rane, Zachary Berger, Yonatan Bitton, Jaemin Cho, Roopal Garg, Alexander Ku, Zarana Parekh, Jordi Pont-Tuset, Garrett Tanzer, et al. Docci: Descriptions of connected and contrasting images. In European Conference on Computer Vision, pages 291-309. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.293, + 0.827, + 0.346 + ], + "angle": 0, + "content": "[193] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating clip-style models on dense captions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26700-26709, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.353, + 0.827, + 0.393 + ], + "angle": 0, + "content": "[194] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen-tau Yih, et al. Altogether: Image captioning via re-aligning alt-text. arXiv preprint arXiv:2410.17251, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.4, + 0.825, + 0.44 + ], + "angle": 0, + "content": "[195] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.447, + 0.827, + 0.499 + ], + "angle": 0, + "content": "[196] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE international conference on computer vision, pages 2641–2649, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.508, + 0.825, + 0.548 + ], + "angle": 0, + "content": "[197] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 787-798, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.554, + 0.825, + 0.595 + ], + "angle": 0, + "content": "[198] Xiyao Wang, Yuhang Zhou, Xiaoyu Liu, Hongjin Lu, Yuancheng Xu, Feihong He, Jaehong Yoon, Taixi Lu, Gedas Bertasius, Mohit Bansal, Huaxiu Yao, and Furong Huang. Mementos: A comprehensive benchmark for multimodal large language model reasoning over image sequences, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.602, + 0.827, + 0.642 + ], + "angle": 0, + "content": "[199] Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23056-23065, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.649, + 0.827, + 0.689 + ], + "angle": 0, + "content": "[200] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.696, + 0.825, + 0.724 + ], + "angle": 0, + "content": "[201] Nazneen Rajani, Lewis Tunstall, Edward Beeching, Nathan Lambert, Alexander M. Rush, and Thomas Wolf. No robots. https://huggingface.co/datasets/HuggingFaceH4/no Robots, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.731, + 0.827, + 0.77 + ], + "angle": 0, + "content": "[202] Aida Amini, Saadia Gabriel, Peter Lin, Rik Koncel-Kedziorski, Yejin Choi, and Hannaneh Hajishirzi. Mathqa: Towards interpretable math word problem solving with operation-based formalisms. arXiv preprint arXiv:1905.13319, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.778, + 0.827, + 0.817 + ], + "angle": 0, + "content": "[203] Chunting Zhou, Pengfei Liu, Puxin Xu, Srinivasan Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, et al. Lima: Less is more for alignment. Advances in Neural Information Processing Systems, 36:55006-55021, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.825, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[204] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[205] Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. Finetuned language models are zero-shot learners. In International Conference on Learning Representations." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "[206] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.825, + 0.182 + ], + "angle": 0, + "content": "[207] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.192, + 0.825, + 0.231 + ], + "angle": 0, + "content": "[208] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.241, + 0.825, + 0.28 + ], + "angle": 0, + "content": "[209] Kaichen Zhang, Bo Li, Peiyuan Zhang, Fanyi Pu, Joshua Adrian Cahyono, Kairui Hu, Shuai Liu, Yuhan Zhang, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Reality check on the evaluation of large multimodal models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.29, + 0.825, + 0.341 + ], + "angle": 0, + "content": "[210] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multimodality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.353, + 0.825, + 0.392 + ], + "angle": 0, + "content": "[211] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.402, + 0.824, + 0.429 + ], + "angle": 0, + "content": "[212] Bin Yan, Yi Jiang, Jiannan Wu, Dong Wang, Zehuan Yuan, Ping Luo, and Huchuan Lu. Universal instance perception as object discovery and retrieval. In CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.439, + 0.824, + 0.479 + ], + "angle": 0, + "content": "[213] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.488, + 0.825, + 0.528 + ], + "angle": 0, + "content": "[214] Jang Hyun Cho, Boris Ivanovic, Yulong Cao, Edward Schmerling, Yue Wang, Xinshuo Weng, Boyi Li, Yurong You, Philipp Kraehenbuehl, Yan Wang, and Marco Pavone. Language-image models with 3d understanding. In The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.538, + 0.826, + 0.59 + ], + "angle": 0, + "content": "[215] Yale Song, Eugene Byrne, Tushar Nagarajan, Huiyu Wang, Miguel Martin, and Lorenzo Torresani. Ego4d goal-step: Toward hierarchical understanding of procedural activities. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 38863-38886. Curran Associates, Inc., 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.601, + 0.826, + 0.64 + ], + "angle": 0, + "content": "[216] Triantafyllos Afouras, Effrosyni Mavroudi, Tushar Nagarajan, Huiyu Wang, and Lorenzo Torresani. HT-step: Aligning instructional articles with how-to videos. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.65, + 0.825, + 0.689 + ], + "angle": 0, + "content": "[217] Effrosyni Mavroudi, Triantafyllos Afouras, and Lorenzo Torresani. Learning to ground instructional articles in videos through narrations. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 15201-15213, October 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.699, + 0.825, + 0.739 + ], + "angle": 0, + "content": "[218] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.749, + 0.825, + 0.789 + ], + "angle": 0, + "content": "[219] Hyolim Kang, Jinwoo Kim, Taehyun Kim, and Seon Joo Kim. Uboco: Unsupervised boundary contrastive learning for generic event boundary detection. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20041-20050, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.799, + 0.825, + 0.838 + ], + "angle": 0, + "content": "[220] Zexing Du, Xue Wang, Guoqing Zhou, and Qing Wang. Fast and unsupervised action boundary detection for action segmentation. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3313-3322, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.848, + 0.825, + 0.875 + ], + "angle": 0, + "content": "[221] PySceneDetect: Video Cut Detection and Analysis Tool, https://github.com/breakthrough/pyscenedetect." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.885, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[222] J. S. Chung and A. Zisserman. Out of time: automated lip sync in the wild. In Workshop on Multi-view Lip-reading, ACCV, 2016." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "[223] Zi-Yi Dou, Xitong Yang, Tushar Nagarajan, Huiyu Wang, Jing Huang, Nanyun Peng, Kris Kitani, and Fu-Jen Chu. Unlocking exocentric video-language data for egocentric video representation learning. ArXiv, abs/2408.03567, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.14, + 0.825, + 0.167 + ], + "angle": 0, + "content": "[224] Dandan Shan, Jiaqi Geng, Michelle Shu, and David Fouhey. Understanding human hands in contact at internet scale. In CVPR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.175, + 0.825, + 0.203 + ], + "angle": 0, + "content": "[225] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In Proceedings of the 37th International Conference on Neural Information Processing Systems, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.211, + 0.825, + 0.262 + ], + "angle": 0, + "content": "[226] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.272, + 0.825, + 0.311 + ], + "angle": 0, + "content": "[227] Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. Perceiver io: A general architecture for structured inputs & outputs. ICLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.319, + 0.825, + 0.358 + ], + "angle": 0, + "content": "[228] F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung. A benchmark dataset and evaluation methodology for video object segmentation. In Computer Vision and Pattern Recognition, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.367, + 0.826, + 0.405 + ], + "angle": 0, + "content": "[229] Sergi Caelles, Jordi Pont-Tuset, Federico Perazzi, Alberto Montes, Kevis-Kokitsi Maninis, and Luc Van Gool. The 2019 davis challenge on vos: Unsupervised multi-object segmentation. arXiv:1905.00737, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.415, + 0.825, + 0.454 + ], + "angle": 0, + "content": "[230] Yan Yan, Chenliang Xu, Dawen Cai, and Jason J Corso. Weakly supervised actor-action segmentation via robust multi-task ranking. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1298-1307, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.462, + 0.825, + 0.49 + ], + "angle": 0, + "content": "[231] Ujjal Kr Dutta, Mehrtash Harandi, and Chellu Chandra Sekhar. Unsupervised deep metric learning via orthogonality based probabilistic loss. IEEE Transactions on Artificial Intelligence, 1(1):74-84, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.498, + 0.826, + 0.537 + ], + "angle": 0, + "content": "[232] Luowei Zhou, Yannis Kalantidis, Xinlei Chen, Jason J Corso, and Marcus Rohrbach. Grounded video description. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6578-6587, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.545, + 0.826, + 0.585 + ], + "angle": 0, + "content": "[233] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.826, + 0.633 + ], + "angle": 0, + "content": "[234] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.641, + 0.826, + 0.681 + ], + "angle": 0, + "content": "[235] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2694-2703, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.688, + 0.826, + 0.74 + ], + "angle": 0, + "content": "[236] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16375-16387, 2022." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "53" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_origin.pdf b/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4699f523994c049dc7f0e5bf830a803201ff1979 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/a31023a5-b71d-43ae-b02b-169e742d817c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba0920ff4a188950b2b33c9d1f706f6917800d61e693ee83b443b7f3cf3df6ce +size 15038983 diff --git a/data/2025/2504_13xxx/2504.13180/full.md b/data/2025/2504_13xxx/2504.13180/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3f91f076beb33572de66168052c4a9160ae5c4e1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/full.md @@ -0,0 +1,1310 @@ +# PerceptionLM: Open-Access Data and Models for Detailed Visual Understanding + +Jang Hyun Cho $^{1,2,\ast,\dagger}$ , Andrea Madotto $^{1,\ast}$ , Effrosyni Mavroudi $^{1,\ast}$ , Triantafyllos Afouras $^{1,\ast}$ , Tushar Nagarajan $^{1,\ast}$ , Muhammad Maaz $^{3,\ast,\dagger}$ , Yale Song $^{1,\ast}$ , Tengyu Ma $^{1,\ast}$ , Shuming Hu $^{1,\ast}$ , Suyog Jain $^{1}$ , Miguel Martin $^{1}$ , Huiyu Wang $^{1}$ , Hanoona Rasheed $^{3,\dagger}$ , Peize Sun $^{1}$ , Po-Yao Huang $^{1}$ , Daniel Bolya $^{1}$ , Nikhila Ravi $^{1}$ , Shashank Jain $^{4}$ , Tammy Stark $^{4}$ , Shane Moon $^{4}$ , Babak Damavandi $^{4}$ , Vivian Lee $^{1}$ , Andrew Westbury $^{1}$ , Salman Khan $^{3}$ , Philipp Krähenbuhl $^{2}$ , Piotr Dólar $^{1}$ , Lorenzo Torresani $^{1,\star}$ , Kristen Grauman $^{1,2,\star}$ , Christoph Feichtenhofer $^{1,\star}$ + +$^{1}$ Meta FAIR $^{2}$ UT Austin $^{3}$ MBZUAI $^{4}$ Meta Reality Labs + +*Joint first author †Work done during internships at Meta *Project lead + +# Abstract + +Vision-language models are integral to computer vision research, yet many high-performing models remain closed-source, obscuring their data, design and training recipe. The research community has responded by using distillation from black-box models to label training data, achieving strong benchmark results, at the cost of measurable scientific progress. However, without knowing the details of the teacher model and its data sources, scientific progress remains difficult to measure. In this paper, we study building a Perception Language Model (PLM) in a fully open and reproducible framework for transparent research in image and video understanding. We analyze standard training pipelines without distillation from proprietary models and explore large-scale synthetic data to identify critical data gaps, particularly in detailed video understanding. To bridge these gaps, we release 2.8M human-labeled instances of fine-grained video question-answer pairs and spatio-temporally grounded video captions. Additionally, we introduce PLM-VideoBench, a suite for evaluating challenging video understanding tasks focusing on the ability to reason about "what", "where", "when", and "how" of a video. We make our work fully reproducible by providing data, training recipes, code & models. + +GitHub: https://github.com/facebookresearch/perception_models + +# 1 Introduction + +Vision-language models (VLMs) are now a key part of computer vision research and are widely used in both academia and industry. Many of the strongest performing VLMs are closed-source, meaning their design, training methods, and the data they use are not publicly shared. To stay competitive, the research community has started to catch up to the proprietary models by using a straightforward approach — distillation from black-box models [1, 2, 3, 4, 5], where proprietary models are directly used to label training data [3, 6, 7], directly leading to strong benchmark results. + +Although distillation will unlock strong performance, there are two main issues for basic research. First, it makes it hard to track scientific progress. Specifically, we cannot tell if better results on benchmarks are due to advances in model design or training, or simply because the proprietary teacher models were trained on the evaluation sets of widely used benchmarks or internal data collected to resemble them — this information is not available. Second, the heavy reliance on distillation leads to a fundamental misunderstanding of the effectiveness of current methods for training VLMs from scratch. Several key questions remain unanswered, including the significance of each training stage, + +![](images/47547bc1f66e8b472335e2743482081a23778369dfd135fb268632f3e2c4efde.jpg) +Figure 1: We introduce the largest collection of manually annotated fine-grained activity QA and spatiotemporal captioning data (left panel). Together with this data, we train and release PLM —open and fully reproducible models to facilitate research in vision-language model training (right panel). + +the influence of synthetic data, the data gaps that the research community should prioritize, and which of these gaps are currently being artificially addressed by distillation from proprietary models. + +To better understand these challenges, we develop the Perception Language Model (PLM), a fully open and reproducible model for transparent research in image and video understanding (Fig. 1 right). PLM consists of a vision encoder with a small scale (<8B parameters) LLM decoder. We start by an analysis of standard training pipelines with available data, without any proprietary model distillation. We investigate large-scale synthetic data and establish key scaling laws to identify critical data gaps that limit video understanding performance, especially for spatio-temporal reasoning and fine-grained understanding tasks. + +To fill these gaps, we create 2.8M high-quality human-labeled instances of fine-grained video QA and spatio-temporally grounded video captions, see Fig. 1. This release is nearly an order of magnitude larger than the largest existing video datasets of each type [8, 9]. Our model, dataset and benchmark push the boundaries of video understanding, and provide a foundation for reproducible and transparent training and evaluation of VLM research. Across 40 image and video benchmarks, we achieve comparable performance with existing state-of-the-art open-weight models (e.g., InternVL2.5 [10]), without distilling from proprietary models, and greatly outperform fully open models (i.e., Molmo [11]). + +# 2 Related Work + +Vision-Language Models. Building on the strengths of large language models (LLMs), several vision-language models (VLMs) have recently been proposed for image understanding [1, 12, 13, 14, 15, 16, 17, 18, 19], video understanding [20, 21, 22, 23, 24, 25, 26, 27] and joint understanding of both images and videos [10, 28, 29, 30]. These works employ several modeling advancements such as dynamic high resolution inputs [12], adaptive token compression [25, 31], and multimodal positional embeddings [30]. + +Open source, open data VLMs. Training data is a key component in developing powerful VLMs. Many existing approaches train on proprietary data that is not released to the community [32, 33, 34, 35, 36] or on data generated using proprietary models (e.g., GPT4o) [3], effectively distilling the closed models. Doing so make measuring scientific progress difficult and limits research on how to train VLMs ground-up. Molmo [11] proposes a class of open-data models, however, they are image VLMs trained on relatively small-scale data, limiting their performance as our experiments will show. + +VLM Benchmarks. Several benchmarks have been proposed to assess the capabilities of VLMs. Popular image benchmarks cover broad perception and reasoning [37, 38, 39, 40, 41, 42, 43, 44, 19, 45, 46, 47, 48] as well as capabilities like image captioning [49, 50, 51], document/diagram understanding [52, 53, 54, 55, 56, 57, 58, 59, 60, 61], mathematical reasoning [62, 63, 64], visual grounding [65, 66] and hallucination [67, 68]. Popular video benchmarks cover video question answering [20, 8, 69, 70, 71, 72, 73, 74, 75, 76, 77, 22, 78, 79, 80], video captioning [81, 82, 83, 84, 85, 86, 87], and hallucination in videos [88, 89]. Many of these video benchmarks remain image-centric — they have questions that can be answered with a few frames. Video-centric reasoning in benchmarks has been relatively neglected with benchmarks proposed only recently for long video understanding [90, 91, 92, 93, 94, 95, 96, 97, 98] and fine-grained, temporal reasoning [99, 100, 101, 102, 103]. We introduce PLM-VideoBench—a benchmark suite aimed at the core, video + +centric capabilities that current benchmarks neglect, namely fine-grained activity understanding and spatio-temporally grounded reasoning. + +# 3 PLM: Overview + +In this section, we overview the model, training stages and training data involved in the development of PLM. Please refer to Fig. 8 for a detailed overview and Appendix A for additional details. + +Model. PLM consists of a vision encoder and language decoder, where a pre-trained Perception Encoder (PE) [104] is connected to the Llama 3 [13] language decoder (1B, 3B, or 8B parameters) with a 2-layer MLP projector. We use PE L/14 for Llama3.2 1B and 3B, and PE G/14 for Llama3.1 8B. For image input, PLM incorporates dynamic tiling to support high resolution images for up to 36 tiles of $448^{2}$ resolution, where each tile undergoes $2 \times 2$ average input, PLM uses 32 frames at $448^{2}$ resolution, v dimensions of each video frame. + +
Stage 1 WarmupStage 2 MidtrainingStage 3 SFT
ModalityImageImage + VideoImage + Video
Data1M Synthetic72M Mix19M Mix
TrainingProjectileFullFull
Downsampling-2 × 22 × 2
Tiles/Frames1/-16/1636/32
+ +Table 1: Summary of three training stages to train PLM. See Appendix Table 7 and Table 8 for data splits. +pooling to compress the visual tokens. For video where the same pooling is applied across the spatial + +Data. The data used to train the PLM consists of synthetic and human-annotated samples. Synthetic data enhances the general capabilities of PLM, while human-annotated data broadens these capabilities to encompass more complex tasks. Synthetic data is sourced from a diverse array of image and video datasets, covering fundamental VLM capabilities such as OCR, chart/document/diagram understanding, image/video captioning, and visual question answering. + +We design data engines for each data modality (e.g., natural images, charts, documents, figures, egocentric and exocentric videos) to efficiently scale up, creating $\sim 66.1\mathrm{M}$ samples ( $\S 4$ ). The synthetic data can be noisy, but is available at large scale; on the other hand, human-annotated data provides rich, high-quality supervision for image and video tasks. Here, we combine existing human annotations of diverse image and video sources, with our own collected human-annotated data, specifically geared towards fine-grained video understanding and spatio-temporally grounded reasoning ( $\S 5$ ). + +Training stages. PLM trains in three stages: + +1. **Projector warm-up.** First, we freeze the vision encoder and LLM and only train the vision projector on a small amount of synthetic image data. This warms-up the newly initialized parameters in the projector and improves stability for later stages. We use $1M$ images from SA-1B [105] with the image captions generated by our data engine (§4). +2. Large-scale midtraining with synthetic data. Next, we train PLM on diverse domains of images and videos at scale, using a maximum of 16 tiles for images and 16 frames for videos. PLM sees around 64.7M images and videos with synthetically generated captions and question-answer pairs. We employ our data engine to scale up synthetic data generation (see §4). +3. Supervised fine-tuning with human-annotated data. Finally, we train PLM with higher image resolutions and more video frames, using up to 36 tiles for images and 32 frames for videos. In this stage, we tackle more challenging video tasks, including fine-grained QA and spatiotemporally grounded reasoning. + +
SamplesTypeStage
Our Human-annotated (2.87M)
PLM-FGQA2.4MFine-grained3
PLM-STC476.2KR(D)Cap + RTL3
Our Synthetic (66.1M)
Natural Images15.9MCaption1,2,3
Charts & Documents31.9MCaption2,3
Videos Mix17.5MMix.2,3
Ego4D880KCap. + QA2,3
Existing Open Source (6.52M)
Image (92 datasets)5.6MDiverse2,3
Video (27 datasets)920KDiverse2,3
+ +Table 2: Summary of the data mix for training PLM. See Table 9 for the full data blend. +Table 1 shows an overview of our training setup for each stage. Appendix A.1 provides the complete training recipe for each stage, including hyperparameters and data sources. + +# 4 Synthetic Data Generation and Scaling + +The predominant paradigm for VLM training is to generate synthetic annotations as cheap alternatives to human-labeled data [1, 106, 30, 107, 10, 11, 15]. Although seemingly promising to get the best results on benchmarks, the majority of such data shared in the community is derived from proprietary models. This trend makes it hard to decouple scientific progress from proprietary distillation impact. In this section, we explore the efficacy of the current paradigm for VLM training in a transparent manner. We design our data engine entirely from open-source models and scale the synthetic data generation to around 66.1M samples of images and videos. We establish the scaling laws of training from synthetic data on standard VLM tasks, including image, OCR/document, and video tasks. + +# 4.1 Data Engine + +Our data engine is designed to target base capabilities of VLMs for image and video understanding. + +Image Data Engine. We generate short and long captions, as well as question-answer pairs, for natural images and those containing documents, diagrams, and text recognizable by optical character recognition (OCR). We prompt openly accessible Llama 3 [13] model to produce factual, detailed image captions while minimizing hallucinations. To create informative question-answer pairs, we utilize OCR data, captions, and other metadata, which are fed into the prompt of a text-only LLM. + +Video Data Engine. For videos, we first use an off-the-shelf scene detector [108] to extract video clips of approximately 30 seconds duration. Then, we extract the keyframes and generate frame-level captions using Llama 3, and video captions using our initial PLM trained with Stage 1 and Stage 3 data as shown in Table 2. We then employ an LLM to refine the frame-level and video captions by incorporating existing video metadata (e.g., action labels, time tags) into a cohesive, detailed video-level caption. Similarly, we generate question-answer pairs from the video-level captions. + +The resulting synthetic data is large-scale and diverse – 66.1M samples carefully curated from a variety of image and video sources including natural images, in-the-wild text, chart, figures, documents, egocentric and exocentric videos. Additional details are in Appendix J. + +# 4.2 Scaling Laws with Synthetic Data + +We examine scaling properties of our synthetic data under controlled setup and establish scaling laws. + +![](images/ce0ddd8b52a979c5ae05cf42d242fd300597fa9681721f1050bf3243c0367b61.jpg) +Figure 2: Synthetic Scaling Plots. Relationship between Average Error across benchmarks and training compute (in floating-point operations) for various PLM models. We report average errors across Video QA tasks [75, 72, 90, 8, 70, 71], OCR QA tasks [109, 53, 56, 57], and Natural Images tasks [45, 110, 111, 68, 40, 112]. Model's performance using only human-labeled data subset are reported (No Syst.) as well as the actual power-law fit of each subcategory. + +Setup. To establish power-law relationship between compute and validation-set errors of downstream benchmarks, we vary the scale of synthetic data, language model decoders (1B, 3B, and 8B), vision encoders (300M and 2B), and resolution/number of frames. For each configuration, we train a model with the 66.1M synthetic data from our data engine and 6.5M publicly available human-labeled data, following stage 2 training described in §3. At every 2M samples, we evaluate PLM on three categories of downstream benchmarks (VideoQA, OCR QA, Natural QA), constructed from 20 vision-language understanding benchmarks that provide a comprehensive and general evaluation of + +multi-modal large language models. We compute the pareto frontier of these data points and fit a power law relationship: $\mathrm{Err.} = (\beta \times \mathrm{FLOP})^{\alpha}$ and compare the exponents $\alpha$ of the power function as scalability of each setup, where a smaller $\alpha$ implies better scaling. + +Scaling with decoder size. Fig. 2 shows the scaling behavior of PLM across various LLM sizes. We show validation-set errors and training compute on a logarithmic scale, with the black linear line representing the power-law relationship between them. Different colors (green, turquoise, and blue) represent different language model scales (1B, 3B, 8B) while keeping the vision encoder size constant at 300M. As described in the setup section above, we show the power law fit of the pareto frontier in each benchmark category. We also show the results of PLM only trained on 4M human-labeled datasets as baselines, denoted with horizontal lines of each color. The gap from the horizontal line to the data point marks the impact of the synthetic data. Interestingly, all three categories of benchmarks demonstrate clear power-law relationship between compute and average benchmark errors, with the power law exponent $(\alpha)$ of $-0.15, -0.20,$ and $-0.11$ for Video QA, OCR QA, and Natural Image QA, respectively. In Appendix B, we provide more details and extend the analysis to (1) scaling the encoder size, and (2) scaling the image resolution and video frames. + +Limitation of synthetic data. In Fig. 3, we evaluate stage 2 on an extended set of video benchmarks. Specifically, we show the result of 7 challenging video tasks on fine-grained activity understanding [97, 100, 89, 101, 99], temporal grounding [113] and long-video reasoning [92]. Unlike generic, high-level understanding (e.g., "what is happening in this video"), the "challenging" tasks require a thorough understanding of video in space and time, and fine-grained semantic details. As shown, the challenging video tasks ("HardQA" in lavender, plum, magenta) show a poor scaling trend $(-0.03)$ compared to general video QA $(-0.15)$ . The stark difference between the two power law fits shows that scaling synthetic data is only effective for established, base tasks. Extending VLMs to + +these more challenging, complex tasks still remain unsolved. Next, we address this challenge with high-quality human-annotated video data, PLM-FGQA and PLM-STC. + +![](images/2a83600a57b71970f5457ecdbd63b416783eb5ed14f72d374d98f922554cea39.jpg) +Figure 3: Limitation of synthetic data. Challenging video tasks (HardQA [97, 100, 89, 101, 99, 113, 92]) do not scale well with synthetic data. + +# 5 Human-annotated High Quality Data + +As shown in Fig. 3, the current paradigm with synthetic data has run out of steam. Training from tens of millions of synthetically annotated data hardly improves our model on new, challenging video benchmarks. Beyond standard VLM tasks, these benchmarks focus on advanced capabilities such as fine-grained activity understanding, temporal grounding, and long video understanding. Perhaps, the knowledge that these benchmarks examine is simply not present in the initial training set of our data engine nor in existing human-annotated data. Our community lacks high quality datasets for detailed visual understanding to start from, that covers what, where, when, and how of activities in video. To address this gap, we introduce two large-scale, human-annotated video datasets: + +PLM-FGQA is a fine-grained video QA dataset collected by asking human annotators to watch a short video segment and answer model-generated questions which focus on "what" activities humans perform and "how" they perform these activities. Question types include fine-grained recognition (action and object), fine-grained temporal perception (direction of movements, repetition counts, hand pose etc.), and fine-grained spatial understanding (object locations and spatial relationships). We use a multi-stage data engine to first extract video segments with salient actions from untrimmed videos through temporal clustering and shot-detection. Next, we generate questions and answers using either a text-only LLM or an early version of PLM. Finally, we refine the answers by asking humans to verify or replace them if they are incorrect, resulting in a high-quality QA pairs. + +Overall, we collect 2.4M question answer pairs from various open-access video datasets [114, 115, 116, 117, 118, 83] spanning over 780k unique video clips from diverse domains (e.g., cooking, DIY, carpentry, automotive and bike repair) and viewpoints (egocentric and third-person); refer to Fig. 13 for domain statistics. This is nearly 8 times larger than the size of the largest existing human-annotated + +![](images/1a7f2c5e0b01ddbfc1ebb31508d2b53949e7690e93cf520c512940ee99f3d650.jpg) +Fine-grained QA (FGQA) + +# Question + +How does the person hold the sandpaper? +Answer: With their right hand, between the right thumb on one side, fingers on the other side. + +# Question + +In which direction is the person moving the sandpaper? Answer + +From the bottom of the baluster to the top in a vertical, oscillating motion. + +![](images/351b84e7e6eab42fbcfc176442e53f21a364c3de841a66578ee280d83a08d964.jpg) + +![](images/eca9333b1282cfb47b5431e79283bb3bfec193130f5dbfeedb73b9e5184df31f.jpg) + +![](images/23424f3efaac5d26d36c163a60d6ae225962eedc063b4b408179011888538ac6.jpg) + +# Question + +How many chakli snacks does the person flip? +Answer +The person flips three chakki snacks with a long metal skewer. + +# Question + +Where is the metal skewer located at the beginning? Answer +Resting on top of the pan, which is positioned on the left burner of the portable stove. + +![](images/15d1e4348b171804a04b545daf5f6f95190b7b83cd493103d526f75cae05e941.jpg) +Figure 4: Overview PLM-FGQA. Examples of question-answer pairs from PLM-FGQA, focusing on fine-grained human activity understanding. PLM-FGQA is approximately 8 times larger than the largest existing human-annotated video QA dataset and addresses a wide range of fine-grained question types that are scarce in existing video QA datasets, such as ones that cover direction of movement, object states, locations and spatial relations. + +video QA dataset in the community [91]. Moreover, as illustrated by the breakdown of question types1 in Fig. 4 (top-right), PLM-FGQA contains a large number of annotations about fine-grained details that have been largely missing in existing training video QA datasets [119, 69, 71, 76, 20, 120, 121, 122, 123]. Please refer to Table 16 for comparison with existing datasets Table 17 for dataset examples and Appendix G for further details. + +PLM-STC is a spatio-temporal video captioning dataset that offers detailed activity descriptions for each video. It includes timestamps ("when") of each activity and focuses on specific subjects identified by a masklet ("where"). We employ a two-stage annotation process to improve efficiency in collecting PLM-STC. In the first stage, annotators select interesting objects that exhibit significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. For segments where the subject is out of frame, we automatically supplement "out of frame" caption. In the second stage, a separate set of annotators write temporally localized descriptions of the highlighted subject focusing on the changes in action across time in relation to the whole video. + +![](images/5f618a5368f89a021eb81345e3435f2d49ab45856488999908734b930348342e.jpg) +[0,11] Out of frame. + +[12, 67] The person wearing a jacket is running on a snow covered ground. She stops and turns to look the other person. + +![](images/3e89be3574960690c64f0b7055c09f35fd1083243414bd350b97a1cb23b8a777.jpg) +Spatio-temporal Captions (STC) + +[0, 19] The man moves gracefully, using his hand gestures that closely resemble a dance in most of his actions. + +![](images/3cb34894424b16abcd3d3c152fa48af28bed03b50451ed2a221c53cd3c8b8e04.jpg) + +[20, 31] The person moves from right to left. + +![](images/6066602844e8b0ad50061ab254751ffb1f532fee4ceadb24ac1ba8e92a6d26a3.jpg) + +![](images/0c0a20a943cefe72f701d79b7f86c02d855115c56f28fa87e454639b1e91c242.jpg) + +![](images/1143283bbc17c530501279b590e48166d0db6a1109e811e196e05fc60f1b5b76.jpg) + +![](images/0255fd220e6ac7c13d170a81727d27bea3e506b3d3069ec1c128a222ce73a63f.jpg) + +![](images/7ef9456e3e8bc1c9728a3e962e610a1c30befbc64747dbf0b91c857670438e56.jpg) + +![](images/97cf383d4d374e8b3977caada0df1ed59c91b9b0a1f796452a8293c81320b1d3.jpg) + +![](images/b278c198dd92d6a581093a9fa531b979cd6e4f3bd04c4e44384c4692ee3b879c.jpg) + +![](images/9864828c246fe26b65456150ba8bd91c706c9812d190f4a5efca447c9728f7af.jpg) + +![](images/d41eaa8e359ef64abc050548e3649c0aeeb3674038d0e26555c3356e5303b499.jpg) + +![](images/29e17118daecdb0236c798256cd130694fef900820dada6615bbaac98b8d8473.jpg) + +![](images/89d0d7a3c21c206e56aaaa11c9c5e7d79f945c9b65cae8a8e0fec0f8e1f86c4a.jpg) + +[0, 81] A little girl moves back as a beluga whale approaches her face. + +![](images/0f31fcadc2545bde6adc001104d669a9c44c08c90a96da858abeba5c2c96a6be.jpg) + +![](images/bc10df81fb05dadf42f2023d44ad0bb57d60963ed328118c67148d5817dc5522.jpg) + +![](images/0634050c5c6c955d91463775c87e845f941b15af2cd726e113fcf6f598984ca1.jpg) + +![](images/596ed4223987ed7563cda1983d1635941fdc699dfd1a300ada8f50b7f5e44453.jpg) + +![](images/00959a907eafab253ebdf64533fd027c7f96ad991275ff48d2062fcf5260ca42.jpg) + +![](images/91ce2b0c7edafe9f2cdb86fd1bac7e9af617712241bb64d90283a784a86583ea.jpg) + +![](images/7505d157279275cd75e2263900d283280d6a31fed16ec40ff7e0b5246fa8b0eb.jpg) + +![](images/08bee4120d15c4cdbacba31bbe60ee9f1500ebfbdb2f97a74191f10929d65b87.jpg) + +![](images/11534c4770ca3e4df1da4029b256918d26cd41aad652b75a64983e9aad4afe44.jpg) +Figure 5: Overview of PLM-STC. Examples of spatio-temporally grounded captions from PLM-STC, the first dataset to associate each caption both with a temporal interval as well as a high-fps sequence of segmentation masks of the subject - i.e., masklets (compared to just a temporal interval or a sparse sequence of bounding boxes). + +![](images/17c5eb7fd14ddd780b690caf976e0b5d6fbf98b5581472641250019c0586823f.jpg) + +![](images/602809f1913f9496e798db0d1ff6265cb86eca6e2102ca6f99b6a28a2a55ecfb.jpg) + +![](images/b28c607dd051eb3686f994e025f772f462e1f5c4155ba9d5120a264759eec9a0.jpg) + +![](images/2bb4604e65b559f08032de179856131f0797537783eeacc79f3eccd2b0f7dd37.jpg) + +![](images/97d9635db506762758a4df3dbf6aa06a6fcab3df945535f5fe1735ee297ab44f.jpg) + +![](images/ffd629f7f7e75549359fde7427e50e5d3bdbadedeb27a25b9dcc7b169a3c6815.jpg) + +![](images/86721f53456f8242f459cbc94cfdb8022a9218ecb8bb1279d08b577df5f60a33.jpg) + +![](images/167690a1116fa3a65a9272f93b7c75944cfdb4555c529306b935c42b6cabbfcc.jpg) + +![](images/ae63e4f0bb0e73421d1c086dfad90e8ed1ab8c8c8cc45e4aad8f14002bf60f93.jpg) + +Overall, we collect 194.2K spatio-temporal captions as the first existing large-scale dense video-region captioning dataset. We convert these spatio-temporal captions into three tasks for training: RCap (194.2K): Given the video region and timestamps, the model generates a caption; RTLoc (194.2K): Given the video region and caption, the model localizes the action; and RDCap (122.3K): Given the video region, the model generates dense, localized captions. In total, we construct $194.2\mathrm{K} + 194.2\mathrm{K}$ $+122.3\mathrm{K} = 522.7\mathrm{K}$ samples, of which $476.2\mathrm{K}$ are used for training and the rest for constructing + +PLM-VideoBench. Please refer to Fig. 5 for dataset examples, Table 19 for comparison with existing datasets, Table 20 for dataset statistics and Appendix H for further details. + +# 5.1 PLM-VideoBench + +Our high-quality human-annotated data offers VLMs to train for broader range of capabilities for holistic video understanding. However, existing video benchmarks are not adequately equipped to evaluate these. To this end, we introduce PLM-VideoBench, a novel benchmark focusing on specific activities (what) and their execution details (how) within spatio-temporal contexts (where and when). + +![](images/87fd1bd30b14c09e549e689f3d4cafff2807a92481fbf1c5bf4db17ddcec181b.jpg) +Figure 6: PLM-Video Dataset includes fine-grained video QA (FGQA), open-ended QA in videos recorded using smart glasses (SGQA), Spatio-Temporal Captions (STC) post-processed into video region captioning (RCap), video region temporal localization (RTLoc) and video region dense captioning (RDCap) tasks. + +Fine-Grained Question Answering (FGQA). In this task, a model must answer a multiple-choice question (MCQ) that probes nuanced, fine-grained activity understanding (e.g., painting "vertically" vs. "horizontally" in Fig. 6, first). We report multi-binary accuracy (MBAcc) [99] where each question is split into multiple binary choice questions. Our test set consists of 4,371 question-answer pairs. For more information, including statistics on video clips, segment duration, question types, and benchmark construction, see Table 18 and §G.2. + +Smart Glasses Question Answering (SGQA). In this task, a model must answer open-ended questions about activities and objects visible in an egocentric video stream recorded by a smart-glasses device (see Fig. 6, second). The questions are designed to simulate real-world scenarios where a user would ask for assistance from their smart glasses. We manually collect the videos using commercially available smart glasses, providing a completely new, unique dataset that reflects modern use-cases such as online AI video assistance and activity coaching. For evaluation, we use LLM-judge accuracy with an open-access model (Llama3.3 70B). The test set consists of 665 human-annotated question-answer pairs. See Appendix I for more details. + +Video Region Captioning (RCap). In this task, a model must generate a detailed description of an event involving a subject of interest in the video. Given a region masklet and a specified time interval, the model is required to output a caption that accurately describes the event occurring within that interval. Compared to traditional video captioning [125, 83, 84] where the aim is to generate a video-level caption, the goal is to generate a region-level caption tied to a specific subject (e.g., a person, object or animal) (see Fig. 6, third). The test set contains 10,060 human-annotated instances and we report LLM-judge accuracy with Llama3.3 70B. See Appendix C.3 for details. + +Region Temporal Localization (RTLoc). In this task, a model must identify the precise time interval within the video when the specified event takes place for the given subject. Given a video, a region masklet and a text description of the event, the model is required to output the start and end timestamps that correspond to the occurrence of the event (see Fig. 6 fourth). Notably, this task is the inverse of RCap — instead of generating the caption, the model receives it as input and generates the corresponding time interval. We filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap. We report average recall@1 over IoU thresholds (0.3, 0.5, 0.7, 0.9). See Appendix C.3 for details. + +Region Dense Video Captioning (RDCap). In this task, a model must generate a detailed description of all events involving a specific subject of interest (e.g., person, animal, or object) in a video. Given a video and a region masklet, the model must produce a sequence of (start, end, caption) tuples that cover the entire duration of the video, including periods when the subject is not visible (see Fig. 6, last). This task is a composition of RTLoc and RCap, requiring the model to produce both temporal windows for events as well as captions directly from the video. The test set contains 2,620 samples and we report the SODA score [126] which uses an LLM judge. See Appendix C.3 for details. + +# 6 Experiments + +We first overview the baselines and evaluation setting (§6.1). We then compare benchmark results of PLMs with the baselines on a broad collection of image (§6.2) and video (§6.3) tasks as well as on our PLM-VideoBench (§6.4). Finally, we provide analyses on data and model ablations (§6.5). + +# 6.1 Setup + +We compare PLMs against the following two classes of baselines: + +- Proprietary models such as GPT-4o [33] (gpt-4o-2024-11-20), Gemini-Pro 1.5 [34] and Gemini-Flash 2.0 [35]. We use API calls to evaluate these models. +- Open-access models such as Molmo-O [11], LLaVA-OneVision [28], Qwen2.5-VL [106] and InternVL2.5 [10] — state-of-the-art open-access models, for which model scale, architecture and inference code are available. We use the official inference code for all models. + +Inference protocol. For mask inputs in PLM-VideoBench, we overlay a colored box on the video frames to specify the regions. We report validation set performance unless specified (in brackets) under the benchmark name. Metrics marked with $\dagger$ use LLM as a judge. Complete implementation details including inference hyper-parameters, task prompts, judge prompts and proprietary model evaluation protocol can be found in Appendix C.4. + +# 6.2 Image Benchmark Results + +We evaluate PLM on a total of 20 image benchmarks. Charts, Diagrams and Documents: answer questions that require parsing images of documents and diagrams; Image Captioning: generate a short/detailed caption, Perception and Reasoning: answer questions of varying difficulty about objects, actions, functional correspondence, multi-view reasoning, spatial layout etc. and Hallucination: evaluate robustness to hallucinated details. More details are in Appendix C.1. + +Table 3 shows our results. Overall, PLM shows strong performance on a wide spectrum of image benchmarks with solely from open-access data with a white-box data engine. Additionally, we report + +
ModelCharts, Diagrams and DocumentsPerception and ReasoningHard PerceptionHalluc.
DocVQA (test) acc [53]CharQA acc [54]TextVQA acc [52]InfoQA (test) acc [56]AL2D (n/o mask) acc [55]OCR-Bench acc [57]MMMU (rval) acc [37]VQA2 (rval) acc [111]OK-VQA acc [39]VizWiz acc [40]SEED (image) acc [58]BLINK (multi-image) acc [44]CV-Bench acc [19]RealWorldQA acc [45]VSR acc [127]POPE acc [68]
GPT-4o [33]92.8*85.7*75.380.7*94.2*81070.7*-63.9-77.1*68.0*72.573.978.087.2*
Gemini 1.5 Pro [35]94.084.274.881.0*95.783063.2-63.9-77.859.881.066.376.188.2*
Gemini 2.0 Flash [35]93.084.880.281.094.079269.9*-57.8-77.064.482.371.974.8-
1B scale
Qwen2VL-2B [30]90.1*75.380.365.5*84.6*809*41.1*80.059.767.472.944.4*17.362.6*73.087.2
InternVL2.5-1B [10]84.8*75.9*72.0*56.0*77.8*785*40.9*72.251.547.471.342.442.158.365.490.2
PLM-1B90.778.682.163.084.980734.881.761.059.776.346.873.867.168.888.4
3B scale
Qwen2.5 VL-3B [106]93.9*83.179.3*77.1*90.2797*53.1*80.863.271.973.147.6*54.465.4*78.588.2
InternVL2.5-4B [10]91.6*84.0*79.372.1*90.5*828*52.3*80.964.061.875.650.8*55.964.680.091.0
PLM-3B93.884.384.374.690.983041.284.366.864.078.555.481.472.480.488.7
8B scale
Molmo-7B-O [11]90.8*80.4*80.4*70.0*90.7*-39.3*85.3*-----67.5*--
LLaVA-OV-7B [28]86.780.077.368.890.165648.983.569.663.476.449.475.066.778.189.2
Qwen2.5VL-7B [106]95.7*87.3*84.9*82.6*93.0864*58.6*70.161.073.573.256.4*11.969.880.387.2
InternVL2.5-8B [10]93.0*84.8*79.377.6*92.8*82356.0*80.669.264.377.654.8*53.970.1*80.090.6*
PLM-8B94.685.586.580.992.787046.185.669.667.079.356.081.375.082.889.9
+ +Table 3: Image benchmarks. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature, and the remaining are reproduced using official code. + +
ModelVCap.Video QAFine-grained Video QAT.Loc.Halluc.
DREAM-1K F/F [86]MVBench acc [70]NEX-TQA acc [69]PerceptionTest (test) acc [71]STAR acc [72]Video-MME acc [75]ActivityNet-QA acc [76]EgoSchemas (test) acc [90]TemporalBench MBA acc [99]TOMATO MBO acc [100]MotionBench (dev) acc [101]TempCompass (MCC) acc [102]CG-Bench (clue) acc [97]Charades-STA mOU [113]VideoHallucer overall acc [88]EventHallusion (binary) acc [89]
Proprietary
GPT-4o [33]-64.6*79.1-70.471.9*-72.2*38.5*37.7*55.974.558.3*38.656.491.9*
Gemini 1.5 Pro [35]-60.5*81.665.9-75.0*56.7*71.2*34.732.056.175.650.1*34.256.080.9
Gemini 2.0 Flash [35]-60.781.9--70.3*-71.5*27.632.856.176.947.0*29.860.181.6
1B scale
Qwen2VL-2B [30]26.863.2*76.453.9*67.355.6*38.427.013.125.746.962.342.80.334.959.9
InternVL2.5-1B [10]27.764.874.359.473.050.3*60.755.727.725.045.056.440.90.831.038.9
PLM-1B34.370.180.372.783.749.262.560.418.225.552.264.643.655.249.279.5
3B scale
Qwen2.5 VL-3B [106]20.367.076.866.9*63.061.5*59.264.8*17.223.549.263.045.738.8*45.253.5
InternVL2.5-4B [10]29.271.782.567.977.262.3*64.166.623.727.452.765.252.08.449.666.3
PLM-3B37.474.783.479.384.854.966.266.923.430.960.469.347.257.755.576.5
8B scale
LLaVA-OV-7B [28]28.057.181.058.166.057.760.545.419.527.653.767.841.212.134.761.1
Qwen2.5VL-7B [106]23.369.6*80.070.5*68.165.5*63.765.0*24.524.651.171.7*49.843.6*50.161.1
InternVL2.5-8B [10]28.572.685.568.9*77.664.2*66.166.2*24.329.453.568.3*53.114.357.160.2
PLM-8B35.977.184.182.784.958.367.368.828.333.261.472.746.458.657.777.3
+ +Image Grounding task results on RefCOCO/+/g [65] datasets in Appendix Table 14, and show that PLM outperforms both specialist models as well as the VLM baselines in all model scales. + +# 6.3 Video Benchmark Results + +We evaluate PLM on a total of 25 video benchmarks. We divide these into the following categories. Video Captioning: generate a short caption for a video, or a dense description of all events; Short video QA: answer a question about a short video (few seconds to a minute), either by selecting from a list of options, or providing a free-form answer; Long video QA: answer a question as before, about a much longer video (minutes to hours); Fine-grained QA: answer detailed questions about spatial location, motion, temporal information etc.; and Hallucination: evaluate the robustness of video models to hallucinated details about objects and events. + +Table 4 shows video captioning, video QA, fine-grained video QA, and video hallucination results. We achieve strong results on widely adopted benchmarks, despite only using open-access data mix free from proprietary model artifacts, outperforming both the open-access and proprietary models. + +Further, we achieve competitive performance on the majority of challenging benchmarks, such as EgoSchema (68.8 %), MotionBench (61.4 %), TOMATO (33.2 %), TempCompass (72.7 %), TemporalBench (28.3 &), Charades-STA (58.6 %), and more. All our model scales show strong performance against both proprietary models as well as open-access baselines of same scale. + +Lastly, we also show that PLMs at all scale greatly outperform existing approaches on captioning tasks and hallucination detection tasks, owing to our focus on detailed, fine-grained spatio-temporal annotations in our human-annotated data collection. + +# 6.4 PLM-VideoBench Results + +We report the result on our proposed benchmark PLM-VideoBench from §5.1 in Table 5. We evaluate our PLM as well as (proprietary and open-access) baselines. In addition, we provide human performance of each subtask in the first row. The results show a significant gap between the baselines and PLM. Proprietary baselines and open-source baselines alike perform reasonably on FGQA tasks, though still 6.5 points lower than PLM (61.2 vs 67.7). On SGQA, where the video sources and the question-answer pairs are unseen to all models, PLM performs reasonably well, yet 2.1 points short from open-access best (InternVL2.5) and far from the best proprietary model + +Table 4: Video benchmark results. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature and the remaining are reproduced using official code. + +
ModelFQQA MBAccSGQAc+†RDCap SDAD†RCap score†RTLoc meanRAvg.
Human perf.90.967.966.653.967.873.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
Open-access
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
+ +Table 5: PLM-VideoBench results. We evaluate PLM against baselines and report breakdowns. We report human performance in the first row. + +(GPT-4o). On spatio-temporal tasks (RDCap, DCap, RTLoc), open source baselines are unable to perform grounded reasoning and default to repeating the same caption for every time interval. Proprietary models perform reasonably well, yet far from the human performance. In all sub-tasks of PLM-VideoBench, PLM shows competitive performance compared to proprietary and open-access baselines. Results for all model scales are in Appendix D. + +Note that the human performance varies based on the nature of the task and evaluation metrics. For example, FGQA human scores are naturally higher than RCap because the task is structured (select the correct option vs. open-ended) and the metric is objective (accuracy vs. LLM-judge accuracy). + +# 6.5 Ablation Studies + +Setup. We perform an ablation study to assess the importance of each of our proposed data, both synthetic and human-annotated. We start with PLM 3B after stage 2 training, and finetune on 4M short image and video SFT data mix ${}^{2}$ for the data ablation. We evaluate and report average video benchmark performance across five categories — video captioning, short video QA, fine-grained QA, and video hallucination, as well as spatial and temporal tasks, PLM-VideoBench and three image categories — image OCR, image captioning, and image perception. Full details are in Appendix A.3. + +
PLM-Synth.PLM-STCPLM-FGQATotal AveragePLM-VideoBenchVideo TasksImage Tasks
PLM-FGQAMBaccPLM-SGQAacc†3 metric avg.Fine-Grained QA5 benchmark avg.Video Cap.Dream 1KVideo QA5 benchmark avg.Video Hallu.2 benchmark avg.Spatial&Temp.4 benchmark avg.Image OCR6 benchmark avg.Image Cap.3 benchmark avg.Image Rec.5 benchmark avg.
XXX48.539.734.46.642.224.067.564.950.676.064.363.3
XX54.349.835.914.748.829.973.273.356.184.065.965.5
X57.949.936.242.148.632.373.974.262.983.867.565.0
X56.762.943.215.250.130.474.176.358.383.764.065.6
61.263.644.042.250.234.374.676.364.383.774.265.4
+ +Table 6: Ablation. We show the impact of individual data components in PLM training. For this ablation, we use a reduced the SFT datamix consists of 4M open-access image and video data. Results are aggregated validation-set performance over selected benchmarks in each category of tasks, details in Appendix A.3. + +![](images/458aaf373773ebd260cfcb625ce01d51d1384862292a9f919b559e5e0e8baf6b.jpg) +Figure 7: HardQA improves with PLM data. + +Discussion. First, we observe that stage 2 synthetic data training boosts model performance across the board. Moreover, adding our PLM-STC data further improves a variety of benchmarks, including PLM-STC (+27.4 points), video captioning (+2.4 points), and most importantly, spatial and temporal tasks (+6.8 points). Adding our PLM-FGQA data improves a distinct set of categories for fine-grained activity understanding; PLM-FGQA (+13.1 points), PLM-SGQA (+7.3 points), Fine-grained video tasks (+1.3 points), video hallucination tasks (+3.0 points), and spatial and temporal tasks (+2.2 points). Using our human-annotated data altogether results in the best performance overall. Further in Fig.7, we show that our human-annotated data improves upon HardQA [97, 100, 89, 101, 99, 113, 92], effectively addressing the limitations of synthetic data discussed in §4.2. + +# 7 Conclusion + +This work presents Perception Language Model (PLM), a fully-reproducible vision-language model to transparently tackle visual perception tasks without distillation of private black-box models. We trained PLM using data from existing open-access datasets and synthetic samples generated by our data engine. We identified gaps in detailed video understanding capabilities that cannot be filled with synthetic data. In response, we collected 2.8M human-labels for fine-grained video question answering and spatio-temporally grounded captioning, and created a new benchmark, PLM-VideoBench, to evaluate these capabilities. We hope our open dataset, benchmark, and models will foster transparent research in visual perception. + +# Appendix + +# Table of Contents + +A PLM Training Details 12 + +A.1 PLM Training Setting 12 +A.2 PLM Training Datamix 13 +A.3 Ablation Experiment Details 14 + +B Synthetic Scaling Experiments 14 + +C VLM Benchmark Details 16 + +C.1 Image Benchmarks 16 +C.2 Video Benchmarks 17 +C.3 PLM-VideoBench 17 +C.4 Evaluation Protocols 18 + +D Additional PLM-VideoBench Results 19 +E Baseline Implementation Details 19 + +F Additional Results 20 + +F.1 Comparison with LLaMA-3V 20 +F.2 Image Captioning 20 +F.3 Image Grounding 21 +F.4 Long Video Understanding 21 + +G PLM-FGQA: Fine-grained QA 22 + +G.1 Annotation process: Data Engine 22 +G.2 FGQA PLM-VideoBench Construction 27 + +H PLM-STC Details 28 + +H.1 Annotation Process 28 +H.2 PLM-STC Benchmark 30 + +I Smart Glasses Data 30 + +I.1 Data collection and annotation 30 +I.2 SGQA Benchmark 31 + +J Synthetic Data Engine 31 +K Qualitative Results 35 +L Limitations and Future Work 39 +M Broader Impact 39 + +# A PLM Training Details + +![](images/edd4dba1e3710253faa97eb20998e70076ec12a6c3c6fb22067fbab64044c139.jpg) +Figure 8: The figure provides an overview of the datasets used in the paper. PLM is trained with $47.8M$ synthetic image and $18.4M$ synthetic video, and $2.9M$ human-labeled video samples. Our data enables PLM to perform a variety of tasks, including standard tasks like Image, Multi-image, and Video QA, as well as new video tasks such as Fine-grained QA (FGQA), Region Temporal Localization (RTLoc), Region Captioning (RCap), and Region Detailed Captioning (RDCap). + +In this section, we describe the training details of PLM. In §A.1 we describe exact details of training setting such as hyper-parameters and implementation details. In §A.2 we describe our datamix for both synthetically generated and human-annotated parts. + +# A.1 PLM Training Setting + +For all three stages, we use AdamW optimizer [128] with weight decay of 0.05 and use FSDP [129] with FlashAttention2 [130] for overall implementation based on PyTorch [131]. + +Stage 1 training. In stage 1, we use a subset of SA-1B [105] paired with detailed captions generated by our data engine (§4.1). We use total 1M samples to train PLM with next token prediction loss, with vision encoder and LLM parameters frozen. This stage is commonly known as warm-up stage. We use learning rate $1 \times 10^{-4}$ for all model scale with global batch size of 512 and $448 \times 448$ resolution. We use the Perception Encoder [104] L/14 variant for the 1B and 3B PLM models, and the G/14 variant for the 8B PLM model. + +Stage 2 training. In Stage 2, we train on a total of 72.5M samples. Of these, 66M consist of images and videos with synthetically generated annotations produced by our data engine. The remaining 6.5M samples are a subset of human-annotated images and videos from open-source datasets, which are included in our final datamix described in §A.2. We train with global batch size of 2048, learning rate of $4 \times 10^{-5}$ , weight decay of 0.05 for the full set of parameters (vision encoder, projector, and LLM). For both image and video input, we use $448 \times 448$ resolution for each tile/frame, which effectively generate 1024 vision tokens. We apply $2 \times 2$ spatial average pooling to reduce this to 256. We use dynamic tiling with a thumbnail to support any resolution and aspect ratio, similar to prior work [12], and uniform sampling of video frames after preprocessing the videos to 1 fps. We set the maximum number of tiles/frames to be 16, which results in maximum of $(16 + 1) \times 256 = 4352$ and $16 \times 256 = 4096$ vision tokens respectively for images and videos. We train the model with a sequence length of 6144 allowing a maximum of 2048 tokens for the text modality. + +Stage 3 training. In stage 3, we use total of 19.1M high-quality datamix spanning over multiple image, video, and text modalities. We describe this datamix in §A.2. In this stage, we use global batch size of 1024, learning rate of $1 \times 10^{-5}$ for 8B and $4 \times 10^{-5}$ for 1B and 3B PLM models. We + +train the full set of parameters for all scales. Similar to stage 2, we adapt dynamic tiling and uniform frame sampling for up to 36 tiles for image and 32 frames for video, with $2 \times 2$ spatial average pooling, which generates $(36 + 1) \times 256 = 9472$ vision tokens for image and $32 \times 256 = 8192$ vision tokens for video. For all modalities, we use 11264 maximum training sequence length. + +# A.2 PLM Training Datamix + +Table 9 presents the full data mix used across all training stages apart from our manually collected data in §5. This contains annotations from existing public datasets as well as synthetically generated data (see §4). We filter and include a wide variety of existing datasets spanning across images (captioning, QA, grounding), videos (captioning, QA, temporal localization, region captioning and dense captioning) and text-only datasets to preserve the text-instruction following capabilities of our model. Most importantly, we filter out every dataset that contains annotations generated by proprietary models. Table 7 and Table 8 shows the exact number of samples for each datasets in Stage 2 and Stage 3 respectively. Marjory of the data in stage 2 are synthetic, with a focus on captioning samples, since they carry the dense information about the image or video. In stage 3, we have one third of the data, mostly focusing on human annotated samples, covering a large variety of tasks. + +
DatasetNum SamplesTypeDatasetNum SamplesType
Image SyntheticImage Synthetic
PDFAcc (QA) [132]12MQAPDFAcc (QA) [132]2MQA
PDFAcc (Cap) [132]12MCap.ArxivCap [134]1.5MCap./QA
UCSF [133]6MQASA1B [105]800KCap.
ArxivCap [134]1.8MCap./QAObject365 [135]300KCap.
SA1B [105]10MCap.OpenImages [136]300KCap.
Object365 [135]3.5MCap.DocVQA [53]100KQA
OpenImages [136]1.8MCap.InfographicVQA [56]50KQA
DocVQA [53]50KQAPixmoCap [11]500KCap
InfographicVQA [56]20KQAVideo Synthetic
PixmoCap [11]600KCapYT-1B (QA) [137]300KMCQA
Video SyntheticEgo4D (Cap.) [115]180KCap.
YT-1B (Cap.) [137]14MCap.Ego4D (QA) [115]700KQA
YT-1B (QA) [137]3MMCQASpoken Moments [138]449KCap.
Ego4D (Cap.) [115]180KCap.Charades [139]8KCap.
Ego4D (QA) [115]700KQAKinetics710 [121]40KCap.
Spoken Moments [138]449KCap.DiDeMo [140]7.5KCap.
Charades [139]8KCap.Text Synthetic
Kinetics710 [121]40KCap.NaturalReasoning [141]1MQA
DiDeMo [140]7.5KCap.Human Annotated
Text SyntheticImage QA [9]2.8MQA
NaturalReasoning [141]1MQAImage Cap [9]36KQA
Human AnnotatedImage Grnd. [9]1.4MQA
Image QA [9]2.8MQAImage Misc. [9]1.4MQA
Video QA [9]570KQAVideo QA [9]570KQA
Video TL [9]16KTemp. Loc.Video Cap. [9]315KQA
Video Dense Cap. [9]10KDense Cap.Video TL [9]16KTL
Text QA [9]2MMixVideo Dense Cap. [9]10KDCap.
Total72.5MVideo Region Captioning [9]15KCap.
+ +Table 7: PLM Stage 2 training data mix. + +Table 8: PLM Stage 3 training data mix. + +
DatasetSize
DVQA [142]222222
PlotQA [143]157070
MapQA [144]42761
OCRVQA [145]167646
Localized Narratives [146]199998
FigureQA [147]119999
Hateful Memes [148]9713
CLEVR [149]73181
CLEVR v.0 [149]70000
IconQA [150]116514
TextVQA [112]21953
GeomVerse [151]11162
RobuT (wikiqsl) [152]80757
WebSight [153]10000
Visual7W [154]15961
TallyQA [155]100050
Robust (WTO) [152]42495
DaTik [156]47974
CocoQA [157]46287
ChartQA [109]27395
VQAv2 [111]82772
Chart2Text [158]35946
VisText [159]35995
FinQA [160]5276
DocVQA [53]12089
STVQA [161]18684
TAT-QA [162]2199
RenderedText [163]10435
RAVEN [164]31418
IAM [165]7549
A-OKVQA [39]17720
TabMWP [166]45439
CocoQA [157]9009
TextCaps [167]21953
Screen2Words [168]16713
VSR [169]2157
TQA [170]9742
Robust (SQA) [152]12769
VisualMRC [171]3027
ScienceQA [61]9947
VQA-RAD [172]313
InfographicVQA [56]2118
Hitab [173]4995
AI2D [55]4863
Inter-GPS [174]2555
diagram_image_to_text [175]595
MIMIC-IT (CGD) [176]70539
MultiHiert [177]15233
NLVR2 [178]136799
RAVEN (Multi-image) [164]56081
SpotTheDiff [179]19340
+ +
DatasetSize
STAR [72]3032
NeXT-QA [69]3870
VISION [180]9900
FlinstonesSV [181]22341
ImageCoDe [182]16594
VizWiz [40]4900
MIT-States (State Coherence) [183]1900
MIT-States (Prop. Coherence) [183]1900
WebQA [184]9338
Birds-to-Words [185]14281
AESOP [186]6915
RecipeQA (Img. Coherence) [187]8699
CLEVR-Change [188]3885
IEEdit [189]3456
ChartQA [109]45820
DocVQA [53]69562
InfographicVQA [56]32661
TextVQA [112]69170
TextCaps [167]21324
VisualMRC [171]24456
WTQ [190]16885
HME100k [191]74492
chrome_writing [163]8825
OK-VQA [110]27536
GeometrySk [174]4802
VQA-RAD [172]1793
Total2796145
Image Cap.
DatasetSize
DOCCI [192]13362
DCI [193]7599
Altogether [194]15166
Total36127
Image Misc.
DatasetSize
AI2d [55]12413
COCO cap. [49]414113
GQA-Balanced [195]943000
Total1369526
+ +
Grounding
DatasetSize
VisualGenome [66]154792
Flickr Entities [196]296332
DCI (Region Caption) [193]304912
RefCOCO/g+/ [197]212923
VCR [60]855577
Total1398690
Image Synth.
DatasetSize
DocVQA [53]50170
InfographicVQA [56]21660
PDFAcc (Cap.) [132]12024670
PDFAcc (QA) [132]12024670
UCSF [133]5953490
ArxivCap [134]1859680
SAIB [105]9834573
Object365 [135]3484584
OpenImages [136]1740864
PixmoCap [11]584650
Total47579011
Video QA
DatasetSize
EgoQA [119]7813
NExT-QA (instruct) [69]34114
NExT-QA (MCQ) [69]34114
PerceptionTest [71]2403
ActivityNetQA [76]23530
VideoInstruct (human) [20]25803
CLEVERR (MC) [120]42620
CLEVERR (QA) [120]40000
Kinetics710 [121]39949
SVv2 (classification) [122]40000
VdLNN [123]43126
VdLNN (QA) [123]75090
How2QA [8]45731
STAR [72]35297
Memento [198]40060
Memento-MultImage [198]40060
Total569710
Video Cap.
DatasetSize
VATEX (en caption) [84]259910
Charades (caption) [139]11593
ActivityNet (captions) [125]33375
YouCook2 [83]10337
Total315215
+ +
Video Temporal Loc.
DatasetSize
HiREST [199]7919
Charades [139]7566
DiDeMo [140]435
Total15920
Video Region Captioning
DatasetSize
HC-STVG [200]10131
VidLN (UVO subset) [123]5296
Total15427
Video Dense Cap.
DatasetSize
ActivityNet [125]8859
YouCook [83]1039
Total9898
Video Synth.
DatasetSize
Spoken Moments [138]449044
Charades [139]7919
Kinetics710 [121]39949
DiDeMo [140]7566
Ego4D (Cap.) [115]183029
Ego4D (QA) [115]703935
YT-1B (Cap.) [137]14792983
YT-1B (QA) [137]3383670
Total19568095
Text-QA
DatasetSize
no robots [201]9485
MathQA [202]29837
LIMA [203]1030
GSM8k (socratic) [204]7473
GSM8k [204]7473
FLAN [205]156050
Dolly15k [206]15011
Maggie Pro (MT) [207]300000
Maggie Pro [207]300000
Total2056359
+ +Table 9: PLM training datamix. Our mix includes synthetic and manually annotated data across a combination of image data (QA, captioning, OCR, Visual grounding), video data (captioning, grounded captioning, dense captioning, temporal localization) and text-only data. Importantly, all data is publicly accessible, and not generated by proprietary models. + +# A.3 Ablation Experiment Details + +We provide additional details about the ablation experiment in §6.5. We report benchmark average scores across 5 categories, along with the average across all of them. We select a representative set of benchmarks from the full set of image and video benchmarks in §6.2 and §6.3 that report comparable scores so the average results are meaningful. For Video captioning we select Dream 1K and report the LLM-judge score with Llama3.3 70B as judge. for Short Video QA, and Finegrained QA, we select benchmarks that report MCQ accuracy (and exclude open-ended QA). For Hallucination, we include both benchmarks. For Spatial and Temporal tasks, we select BLINK, CVBench, VSR, and Charades-STA. For Image Perception, we choose SEED, MMMU, VQAv2, OK-VQA, and VizWiz. We train the ablation setup of SFT with the exactly matching hyperparameters as our final run; only difference is the size of the SFT datamix. + +# B Synthetic Scaling Experiments + +In this section we provide additional results to the synthetic scaling experiments in §4.2. We report aggregate benchmark accuracies across three categories — Video QA, OCR QA and Image QA — by selecting representative benchmarks from each category. For VideoQA, these are STAR [72], EgoSchema [90], MVBench [70], VideoMME [75] and PerceptionTest [71]; For OCR QA, these are ChartQA [109], DocVQA [53], InfographicsQA [56], TextVQA [112] and OCRBench [57]; and for Natural Image QA, these are RealworldQA [45], OKVQA [110], VQAv2 [111], and VizWiz [40]. + +Scaling with encoder size. After investigating the impact of the LLM decoder in Fig. 2, we examine the impact of increasing the vision encoder size from 300M (PE Large) to 2B (PE Giant) for each language model scale next. In Fig. 9, we overlay the new power-law with the 2B vision encoder (black dashed) line onto the 300M (red dashed) line. Notably, we find that the larger vision encoder $(300\mathrm{M}\rightarrow 2\mathrm{B})$ leads to greater scaling trend on video QA benchmarks. Quantitatively, the power law + +![](images/6525a07da003b5c1fffc7c0015dafa7775dff35e2d902f016405d9ed15508a31.jpg) +Figure 9: Scaling with encoder size. Scaling trends of PE-G vs. PE-L vision encoders. Larger encoders scale better in Video QA tasks while similar scaling in OCR and Natural QA is seen. + +![](images/573bd6d623a8d5b13a56fb1f2ea46ca6cef2b1bfd8d35822d07f3e063f8a145a.jpg) + +![](images/487a1ee41bc10230d773ecc387fbde4d501749ece8e09a761762110ba188ea0b.jpg) + +fit has improved from $-0.15$ to $-0.19$ . The two lines intersect around 8B scale with PE-G, proving that 8B and larger PLM will benefit more with larger vision encoder. We use PE-L for 1B and 3B LLM scale and PE-G for 8B scale by default. + +![](images/e6fee9284c2eaca7f5a2d382d7a63ae2cdd7e4e337d5878516729ae373c966e7.jpg) +Figure 10: Scaling with input size. Scaling trends of training with 16 tiles/frames vs. 8 tiles/frames. Higher input size scales better in Video QA and OCR QA tasks while similar trend is seen for Natural QA. + +![](images/cccf7fbf94228c5846ea07593981fd1e54fae7f5523b10ef64fbef5444020990.jpg) + +![](images/2046c732751f3301f7e0973665e26c74a9d9a78af266e79571054ff968423369.jpg) + +Scaling with input size. In Fig. 10, we show the impact of increasing the input size to VLM through higher image resolution and more video frames. In this setting, each scale of PLM trains with dynamic tiling for image input and uniform sampling for video input with maximum 8 or 16 tiles/frames per sample. In each plot, the average error of PLM trained with 16 tiles/frames are plotted. All models use $2 \times 2$ spatial average pooling before input to LLM, and each tile/frame has $448 \times 448$ resolution. Similar to Fig. 2, we show power law fit with a black dashed line, and compare to 8 tiles/frames training denoted with red dashed line. Notably, we find out that on Video QA and OCR QA benchmarks, PLM shows better scalability with training with higher input size. This means with the same FLOP counts at $10^{13}$ , training with 16 frames makes 2.0 points of metric error lower than 8 frames counterpart (32.2 vs 30.2). Similar trends are observed with OCR QA going from 8 tiles max. to 16 tiles max. Notably, higher resolution did not make a difference for Natural QA tasks. We chose the 16 max-tiles and frames to be our final training setting for stage 2 PLM. + +In Fig. 11, we show the breakdown of the scaling trend shown in §4.2. “H” stands for human only (i.e., no synthetic) baseline. From the breakdown, the most notable point is the the scalability in OCR, Chart, Document QA tasks. In each benchmark, synthetic data makes more than 10 points of improvement on every model scale, compared to “no synthetic” baselines. Moreover, there is no sign of saturation; the performance will most likely improve with more synthetic data. We hypothesize that OCR, Chart, Document QA tasks reduce to “translation” task — a set of pixels has one-to-one mapping to text space. Remaining tasks exhibit clean power-law relationship between metric error and FLOPs. The last plot shows scaling trend on average over all benchmarks, which shows a close power-law relationship. + +![](images/4c457bf8ecc0ab0daa868ea993daecb6172db218673b9689778445fa6b15ba2c.jpg) + +![](images/fb955c1df24e9112822420882dbfd46afea779896098cba24e5b220c768b3eac.jpg) + +![](images/74db19474066235320891f11a947c6133e419da84821eecd7a53417ba79ee176.jpg) + +![](images/5e5f6ebd7ada288edd9b5f729682ee4729833b0cb5006c1f67e04520365cd919.jpg) + +![](images/ac7fd38db459c2e2517754cba41a8e08d3c9cf19bf27aa7a1ff5269befcb79a3.jpg) + +![](images/a9c3bdc1ccd219d5d46e0c496746e9f91aeebbcd86a9846587428409124b6e42.jpg) + +![](images/a99f19209d3bde08b3fb2bba24b4c3b3c12a36b8ddbc73dce7c33808a90a5be5.jpg) + +![](images/a294ad701203332c43e130b55e4a3017cfe651e413ef4b236c2112092d14e26f.jpg) + +![](images/11f87cbff7e7f08a9001c94408c8ee70a0f8f29c2ff7c14281721040043a07c5.jpg) + +![](images/273c50f3acd90ba19b5a065e5955c09ee0f714dc7f1e3cfc5b137b8e820a6380.jpg) + +![](images/c7494f8c14fe238b6ecea4422a302a3709fa4f8661f97f8802587ed18b99d19d.jpg) +Power Law Fit + +![](images/8e48e6d52f6bcbb64acd39bed728f3b5432cf2832b70f126f223cba10bc1f95c.jpg) + +![](images/4ba34e7af89ce61daee2cab5adc88f0f87203cafb8df2cdb93055231529325fe.jpg) +Figure 11: Synthetic Scaling Plots. Relationship between Average Error and training compute (in floating-point operations) for various 1B, 3B, 8B PLM with L14 vision encoder. Each plot reports the individual error in VideoMME [75], STAR [72], EgoSchema [90], How2QA [8], MVBench [70], PerceptionTest [71], ChartQA [109], DocVQA [53], InfoVQA [56], OCRBench [57], RealworldQA [45], OKVQA [110], VQAv2 [111], VizWiz [40], and TextVQA [112]. Finally, we report Avg. All, which average over all the metrics. + +![](images/ea8890b185233495dcde92782d8a3178b0051f492f0303f6c51666a81220f5fb.jpg) + +![](images/806474d8360c64160660f815fe8d5cc8cc35ffc5e40e6f53ac1bfd5ae88e9da8.jpg) + +# C VLM Benchmark Details + +In this section, we provide details about all the image and video benchmarks considered in §6 including composition and evaluation metrics for image benchmarks (§C.1), video benchmarks (§C.2) and our PLM-VideoBench (§C.3. We also describe evaluation protocol for all these benchmarks including inference parameters and prompts (§C.4). Pointers to evaluation code are linked where available. + +# C.1 Image Benchmarks + +Image captioning We evaluate on single image captioning and grounded image captioning benchmarks like COCO [49], nocaps [50] and Flickr [51]. We report CIDEr as the evaluation metric. + +Perception and reasoning We evaluate on broad, general purpose VQA benchmarks like MMMU [37], VQAv2 [111], MMBench [38], OK-VQA [39], VizWiz [40] as well as hard perception benchmarks like BLINK [44], CV-Bench [19], RealWorldQA [45], and VSR [127]. For all MCQ benchmarks, we report accuracy of selecting the correct option. + +Charts, diagrams and documents We evaluate on benchmarks for reasoning over various types of charts, graphs, diagrams, infographics etc. Specifically, DocVQA [53], ChartQA [54], TextVQA [52], InfographicsVQA [56], AI2D [55], OCRBench [57], and SEED [58]. We report accuracy of selecting the correct option. + +Image Hallucination Finally, we evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as HallusionBench [67] and POPE [68]. For HallusionBench we report the $aAcc$ metric (code) which accounts for correctness and consistency using an LLM judge. + +# C.2 Video Benchmarks + +Video captioning We evaluate on short-video captioning benchmarks, namely YouCook2 [83] and VATEX [84] as well as recent detailed video captioning benchmarks — DREAM-1k [86] and AuroraCap-VDC [87]. For YouCook2 and VATEX, we report CIDEr score [208]. For DREAM-1k we report AutoDQ F1-score (code) and for AuroraCap-VDC we report the VDC accuracy (code) following the author's proposed metric. + +Short video QA We evaluate on multiple-choice (MCQ) benchmarks such as How2QA [8], NNextQA [69], PerceptionTest [71], STAR [72], TGIF-QA [73], TVQA [74], Video-MME [75] and TVBench [80]. We report accuracy of selecting the correct option. We also evaluate on open-ended question answering benchmarks (w/o options) such as ActivityNet-QA [76] (code), MMBenchVideo [79] (code) and VCGBench-Diverse [22]. We report LLM-judge scores/accuracies for these benchmarks. For VCGBench-Diverse, we report the average of 5 LLM-judge scores (code). + +Long video QA We evaluate on popular long-video benchmarks such as EgoSchema [90], LVBench [92], LongVideoBench [94] and MLVU [96]. We report accuracy of selecting the correct option. + +Fine-grained video QA We evaluate on benchmarks for fine-grained spatial, temporal and detail reasoning in videos such as TemporalBench [99], TOMATO [100], MotionBench [101], TempCompass [102] and CG-Bench [97]. We report accuracy of selecting the correct option. For Temporal-Bench, we report the multi-binary accuracy (MBAcc) (code) proposed by the authors to reduce bias in evaluation. + +Hallucination We evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as VideoHallucer [88] and EventHallusion [89]. We report accuracy of selecting the correct option. + +# C.3 PLM-VideoBench + +We evaluate on our suite of benchmarks for fine-grained and spatio-temporal reasoning in videos. These include: + +Fine-grained QA (FGQA) We report multi-binary accuracy (MBAcc) following prior work [99]. In short, this entails presenting the model multiple independent, binary-choice questions about the same video (in our case, three questions) and requiring the model to gets all of them correct, to count towards accuracy. This sets a higher bar for models, and combats bias in multiple-choice question benchmarks that prior work identifies. + +SmartGlasses-QA (SGQA) We report LLM-judge accuracy of the predicted answer compared to the ground truth answer. We follow existing LLM judge prompts from ActivityNetQA (code). The prompt is repeated below for completeness. + +Video Region Captioning (PLM-RCap) We use an LLM-judge to generate the similarity scores between predicted and ground truth captions. The prompt is below. + +**Dense Video Region Captioning (PLM-RDCap)** We adapt the SODA metric [126] from dense video captioning literature for this task. To compute this metric, we use the same LLM-judge from + +above to generate the pairwise similarity scores between predicted and ground truth captions, which is then fed to the standard metric computation routine. + +Region Temporal Localization (PLM-RTLoc) We report standard temporal localization metrics, namely Mean Recall@1, averaged over a range of IoU thresholds [0.3, 0.5, 0.7, 0.9]. + +# C.4 Evaluation Protocols + +Common evaluation protocol. For video benchmark evaluations, we sample 32 frames uniformly from the full video unless otherwise specified. For uniformity and consistency across benchmarks, we implement all LLM-judge evaluations using LLama3.3-70B-Instruct [13], following LLM judge prompts from popular evaluation frameworks [209, 210] where available. Outputs from all models are generated via greedy sampling (temperature 0). + +# SG-QA judge prompt + +You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task: + +# ##INSTRUCTIONS: + +- Focus on the meaningful match between the predicted answer and the correct answer. +- Consider synonyms or paraphrases as valid matches. +- Evaluate the correctness of the prediction compared to the answer. + +Please evaluate the following video-based question-answer pair: + +Question: [question] + +Correct Answer: [target] + +Predicted Answer: [candidate] + +Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. For example, your response should look like this: {"pred": "yes", "score": 4.8}. + +# PLM-RCap judge prompt + +Your task is to compare a given pair of captions and provide a single score indicating how correct the pred is compared to GT, on a scale from 0 to 10. Focus on meaning and context, not exact word matches. Penalize missing and incorrect information, with lower scores for more significant errors. High scores require accurate conveyance of all key GT information. Respond with only the score, starting your response with the number and including no additional text. Output format: [score]. + +PLM-VideoBench inference prompts. Table 10 contains example inference prompt examples for each PLM-VideoBench task. Note that some variation exists between instances in the benchmark. For example, for RCap a prompt may be "What is happening to the subject in the region highlighted by the red rectangle ..." instead of "Give a detailed description of the events occurring in the region marked by the red rectangle ..." however they convey the same underlying instruction and information. + +Proprietary models like GPT-4o and Gemini require more careful prompting to ensure that the output formatting is respected. For example, we append instructions to prevent model hallucinations (e.g., "You must use these frames to answer the question; do not rely on any external knowledge or commonsense"), to prevent refusals to answer (e.g., "Even if the information in these separate frames is not enough to answer the question, please try your best to guess an answer which you think would be the most possible one based on the question. Do not generate answers such as not possible to determine") and in-context examples to help guide the model towards the correct output format. Model- and benchmark-specific inference prompts will be released along with our code for full reproducibility. + +
TaskPrompt
FGQAQuestion: [question] \n Options: \n (A) [option1] \n (B) [option2] \n Only give the best option.
SGQAThe following question is asked by the camera wearer at the end of the video. Provide a detailed answer even if unsure. Try to answer in around 20-30 words. Now answer the following question based on the video content: [question]
RDCapCreate a dense caption of the subject's actions within the red rectangles, including action frames ids and brief descriptions. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video.
RCapGive a detailed description of the events occurring in the region marked by the red rectangle within frames ([start frame], [end frame]) in this 32 frame video
RTLocGiven the region marked by the red rectangle in the video, please provide the start and end frame of when '[event]' happens. Use the format (start, end), where start and end are frame numbers between 0 and 31 in this 32 frame video.
+ +# D Additional PLM-VideoBench Results + +We present benchmarking results across all model scales (1B, 3B, 8B) in Table 11, to supplement the 8B model results in the main paper (Table 5). Our approach consistently outperforms baselines across all scales, including proprietary models whose model scale is unknown. + +Table 10: PLM-VideoBench task prompts. Items in square brackets are placeholders filled in for each benchmark instance. + +
ModelFGOAMBaccSGQAAcc†RDCAPSOA‡RCapscore†RTLocmeanRAvg
Human perf.90.967.966.653.967.870.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
1B scale
Qwen2VL-2B [30]39.038.50.918.110.829.1
InternVL2-1B [10]35.828.90.317.22.723.8
InternVL2.5-1B [10]42.339.66.723.61.630.8
PLM-1B57.640.950.340.957.749.4
3B scale
Qwen2.5 VL-3B [106]43.745.10.317.213.933.1
InternVL2-4B [10]43.241.70.519.99.630.3
InternVL2.5-4B [10]50.049.24.925.915.435.3
PLM-3B67.138.853.145.058.253.0
8B scale
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
+ +Table 11: PLM-VideoBench results across all model scales to supplement results in Table 5. + +# E Baseline Implementation Details + +We provide baseline-specific implementation details for all models in §6.1 of the main paper. + +Proprietary baselines We evaluate the GPT and Gemini family of models. For GPT-4o, we use the GPT-4o-2024-11-20 checkpoint. We feed 32 uniformly sampled frames regardless of video length, loaded at high image quality setting. For Gemini, we evaluate Gemini-1.5-Pro and Gemini-2.0-Flash. For VQA tasks, we input the video (without audio) which is processed internally at 1 fps. For + +spatio-temporal tasks (RCap, RDCap, and RTLoc) we use the same inputs as for open-source models and GPT-4o. We evaluate these models using API call. + +Open-source models We evaluate InternVL, Qwen, Molmo and Llava-OV models. We follow official implementation and preprocessing pipelines for each. Specifically, we evaluate InternVL2 and InternVL2.5 (code); QwenVL2 and QwenVL2.5 (code); Molmo-O-0924 (code) and Llava-OV (code). For QwenVL, we sample frames at 1 fps from videos. For InternVL2, we use 12 tiles per image as this more closely matches the reported results. + +Human performance baseline. In Table 5, we report human performance on PLM-VideoBench. For each task, we present annotators with the test sets and collect answers for each instance given the standard task prompt. Given the difficulty of RDCap, we reuse our data annotation pipeline in $\S H$ to collect new dense captions independently, rather than providing the standard task instruction. + +# F Additional Results + +# F.1 Comparison with LLaMA-3V + +
ModelAvg.DocVQA (test) acc [53]CharQA (test) acc [54]TextVQA (test) acc [52]InfoQA (test) acc [56]AL2D (two mask) acc [55]MMMU (val) acc [37]VQAV2 (val) acc [111]
LLaMA 3.2V (11B) [13]73.088.483.479.763.691.150.775.2
LLaMA 3.2V (90B) [13]76.690.185.582.367.292.360.378.1
PLM (1B)67.190.778.682.163.084.934.881.7
PLM (3B)74.493.884.384.374.690.941.284.3
PLM (8B)76.294.686.586.580.992.746.185.6
+ +# F.2 Image Captioning + +Table 12: PLM versus LLaMA-3V on Image Benchmarks: Note that we use LLaMA-3V-90B [13] for generating image captions in our synthetic data engine. + +
ModelCOCO (karnathy) CIDEr [49]Nocap CIDEr [50]Flickr CIDEr [51]
Proprietary
GPT-4o [33]74.476.671.7
Gemini 1.5 Pro [35]70.671.168.2
Gemini 2.0 Flash [35]84.885.066.6
1B scale
Qwen2VL-2B [30]107.1101.286.0
InternVL2.5-1B [10]122.6110.586.1
PLM-1B138.6124.2100.5
3B scale
Qwen2.5 VL-3B [106]101.7105.577.5
InternVL2.5-4B [10]125.4117.187.4
PLM-3B144.9126.598.0
8B scale
LLaVA-OV-7B [28]112.170.755.7
Qwen2.5VL-7B [106]36.832.734.9
InternVL2.5-8B [10]125.8116.796.5
PLM-8B146.7129.9105.6
+ +Table 13: Image Captioning benchmarks. PLM versus proprietary models and open-access baselines of comparable scale on Image Captioning benchmarks. + +# F.3 Image Grounding + +
ModelRefCOCOvalRefCOCO testARefCOCO testBRefCOCO+ valRefCOCO+ testARefCOCO+ testBRefCOCOg valRefCOCOg testAvg.
Specialists
GroundingDINO [211]90.693.288.288.289.075.986.187.086.6
UNINEXT-H [212]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [213]90.693.288.288.289.075.986.187.086.6
1B scale
PLM-1B88.591.584.883.288.676.586.086.485.7
3B scale
Qwen2.5 VL-3B [106]89.191.784.082.488.074.185.285.785.0
PLM-3B93.394.989.589.893.684.290.890.990.9
8B scale
Cube-LLM [214]90.992.687.983.989.277.486.687.287.0
Qwen2VL-7B [30]91.793.687.385.890.579.587.387.887.9
Qwen2.5VL-7B [106]89.191.784.082.488.074.185.285.785.0
InternVL2-8B [10]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [10]90.394.585.985.291.578.886.787.687.6
PLM-8B90.691.885.987.391.381.188.889.288.2
+ +# F.4 Long Video Understanding + +Table 14: Image Grounding results on RefCOCO+/g. PLM performs competitively compared to the baselines across all model scales, and outperforms specialist models for the image grounding task. + +
ModelLong Video QA
LVBench acc [92]Long VideoBench (val) acc [94]MLVU (dev) Marq [96]
Proprietary
GPT-4o [33]37.266.7*67.4
Gemini 1.5 Pro [35]33.1*64.0*69.9
Gemini 2.0 Flash [35]-61.6*69.5
1B scale
Qwen2VL-2B [30]42.047.962.7
InternVL2-1B [10]31.443.3*52.0
InternVL2.5-1B [10]35.347.957.3*
PLM-1B40.052.358.9
3B scale
Qwen2.5 VL-3B [106]43.3*54.2*68.2
InternVL2-4B [10]34.053.0*59.9*
InternVL2.5-4B [10]40.156.368.3*
PLM-3B40.457.965.0
8B scale
LLaVA-OV-7B [28]38.855.764.6
Qwen2VL-7B [30]46.055.869.8*
Qwen2.5VL-7B [106]45.3*56.0*70.2*
InternVL2-8B [10]37.055.464.0*
InternVL2.5-8B [10]43.2*60.0*68.9
PLM-8B44.556.966.4
+ +Table 15: Results on long video understanding tasks. We compare PLM with open-access baselines and proprietary models of comparable scale, and report results over 3 long video QA benchmarks. Cells with * are reported numbers from literature. The remaining are reproduced using official code. + +# G PLM-FGQA: Fine-grained QA + +We present PLM-FGQA Fine-grained QA (FGQA), a video dataset focused on "how" actions are performed, capturing nuanced fine-grained details through specially designed questions and carefully annotated answers. Due to the scarcity of fine-grained video Q&A data, see Table 16, we built a data engine to enable the collection of our 2.4M Q&A dataset, PLM-FGQA. + +
DatasetYear#Q&AsDatasetYear#Q&As
MovieQA20166462STAR202160000
MSRVTT-QA2017243690CLEVRER202382620
TGIF-QA2017165165EgoQA202419000
MSVD-QA201751000PerceptionTest202444146
TVQA2018152545VideoInstruct202425803
ActivityNetQA201958000MoVQA202421953
How2QA202044007CinePile2024303828
Next-QA202152044Sports-QA202594000
PLM-FGQA20252379067
+ +Table 16: Comparison of our PLM-FGQA dataset with existing video-QA datasets. + +# G.1 Annotation process: Data Engine + +Our data engine is built upon the following modules: (1) Temporal Segment Generation, (2) Question Generation, (3) Answer Generation, (4) Human Annotation (answer verification/manual answer annotation), (5) Quality Control, as illustrated in Figure 12. Next, we describe each module in detail, and finally also provide additional details about the extra steps we took for forming the FG-QA component of PLM-VideoBench out of these annotations. + +![](images/74e60f80c4d1caa05d26073b5154c8482957d25d0a6ca771bb72b8df194ab130.jpg) +Figure 12: Data engine used to collect the PLM-FGQA dataset. + +# G.1.1 Temporal Segment Generation + +We source the video data that serves as a basis for our annotations from publicly available datasets. Based on the video sources and the type of existing annotations, we split the videos into three distinct categories. + +Videos with existing ground-truth segment annotations: We directly adopt segments with their human-annotated action annotations from the following datasets: Ego4d Goal-Step[215], Ego4D Moments[115], EgoExo4D [116], HT-Step[216, 217], COIN [117], CrossTask [118], and YouCook2 [83]. All those sources provide video segment boundaries accompanied by some form of textual action descriptions, and are therefore readily usable with the rest of the pipeline. + +Unedited videos of physical activities: For physical activities videos (e.g. basketball, dancing, soccer), actions are usually atomic and short (e.g. dribble, dance move, kick) and therefore rerequire precise temporal localization. To source videos for these scenarios we used data from EgoExo4D [116] that contains temporally well-aligned and precise narrations; we obtained segments of 2-3 seconds centered around narration timings, and used the anchor narrations directly as the action description. + +Raw, untrimmed videos in-the-wild without temporal segment annotations. We source a very large part of our data from untrimmed instructional videos in the large-scale HT100M dataset [114] which we first need to segment before use. The goal is to obtain video clips that contain meaningful, salient actions, and also caption the resulting segments with concise but accurate action descriptions. We describe the automatic segmentation and captioning module in the following. + +The automatic segmentation and captioning pipeline involves the following three stages: + +![](images/9c9906e83a225a513caff2dfdd4843d2b7b4a39adfecf3ff65b862754c2765b7.jpg) +Figure 13: Distribution of question types (left) and video sources (right) in the FGQA component of PLM-VideoBench. + +![](images/f8993a3092ffb9f6800aa080896e31690641b37cada9f9291ff4057e8ca301a8.jpg) + +Temporal segment proposal. Given untrimmed long videos, the first step is to identify semantically coherent segments within them. Inspired by prior work on unsupervised action proposal and segmentation, we leverage visual feature clustering to generate temporal segment proposals, and use shot-boundary detection results to further refine the segment boundaries. We extract clip-level visual features[218] using a sliding window with temporal stride of 1 second. We then compute the pairwise similarity between neighborhood features and detect the class-agnostic action boundaries using a boundary detection kernel (similar to those used in literature[219, 220]). Finally, since the detected segments are usually over-segmented, we perform a bottom-up agglomerate clustering approach to group adjacent segments into clusters, using a segment duration prior of 10 seconds. We also leverage shot boundary detection[221] to obtain precise moments of scene changes: we refine the boundaries of the segment proposals by aligning them to the detected shot boundaries when they're sufficiently close ( $\leq 1$ second). + +Segment filtering and ranking. How-to videos often include a lot of content that is irrelevant to the demonstration of the activity at hand, such as the instructor explaining what they are about to do or showcasing tools and ingredients. It is therefore important to detect and filter segments with such uninformative content. To that end we rank candidate segments according to relevance using a series of heuristics and learned models, described below. + +a. Talking head detection. A common mode in instructional videos is instructors talking into the camera, describing objects or explaining actions they're about to take. To detect and remove such segments, we employ an Active Speaker Detection (ASD) pipeline[222], which we run densely on every video and combine resulting talking head tracks, to produce an ASD score for every segment. +b. Hand-object interaction (HOI) detection. The presence of hand-object interaction (HOI) can be a good indicator of visually groundable actions. We leverage the temporal selection strategy[223] to filter out the segment proposals that contain hand-object interaction. We first employ an off-the-shelf robust HOI detector[224] to densely extract HOI regions within a proposed segment. The HOI score is then calculated by measuring the likelihood of hand-object interaction in the segment and the averaged probability of all the detected hands. +c. ASR groundability. HT100M videos contain timestamped ASR captions, which are speech transcriptions of the audio instructions. It is desirable to rank candidate segments based on how likely their ASR content is to their video content. The hypothesis here is that segments containing ASR transcriptions that align well to the video content, are more likely to be visual-information rich. Moreover since the action labeling pipeline (described next) relies on ASR metadata for producing descriptions, higher ASR groundability scores make it likelier to produce good quality segment descriptions. For every candidate segment, we compute an ASR-groundability score by computing video-text alignment scores[218] for each ASR caption within the segment and then averaging the ones that are above a threshold (we use 0.5). +d. Relevance classification. The above heuristics work well for the clear-cut cases they are tailored for, but in practice we found that they struggle with more nuanced segments (e.g. instructor fiddling with an object and describing it rather than using it). To improve the detection of those cases, we manually labelled a small amount of segments that passed through the other filters and trained a binary classifier to classify them as "relevant" or "irrelevant"; to that end we trained a simple 2-layer MLP classifier + +on top of temporally pooled video representations with a logistic loss for binary classification. We deployed the trained model to provide a relevance score for all the candidate segments. + +We combined the scores resulting from all the modules described above and determined cutoff thresholds, based on a small manually annotated validation set. In production, we keep all the segments that have relevance scores above those thresholds. + +Segment captioning We follow a two-step process to obtain action labels for each unlabeled segment: In the first step, a collection of off-the-shelf perception models are used to extract individual image-level captions, video-level captions, and object detections from the segment. The output of all perception models is then fed as text into an LLM to generate long, fine-grained captions. At the second step, the detailed captions are fused with the ASR content of the segment, to obtain a consice action description. Specifically, we query an LLM (Llama 3.3 70B [13]) with the following prompt: + +# Segment to action labels prompt + +Detailed description: [fine grained caption] ASR transcription: [asr caption]. Given the detailed description above, identify the specific action performed as part of the activity [task name]. Your response must not be the same as the activity [task name] and needs to be a specific substep within the activity [task name]. Please also supply a rationale for your answer. + +The extracted labeled video segments obtained through the above process serve as the foundation for the subsequent Q&A generation. + +# G.1.2 Automatic Question Generation + +We automatically generate questions about the fine-grained details of the way activities are executed in the video. Our questions is generated with a variety of prompts and models which lead to increased question diversity and specificity. In Table 17 we present the question types and sample questions per question type. Here, we summarize how these questions are generated automatically with an ensemble with models and prompts: + +LLM-based action-conditioned question generation Given a segment, its action name (e.g., cut potatoes), a task name (e.g., How to make sweet potato gratin) and optionally other metadata about the segment (for example, recognized objects [?]), we generate questions that can elicit descriptions of fine-grained details by raters with an LLM. We use tailored prompts for generating questions that cover how the activity is executed (tools, object locations, object states, direction of movements, hand pose), and the spatial arrangement of objects. + +# Activity FG question generation prompt + +I am learning how to [action name] while [task name]. Ask me [N] most relevant questions that reveal the details of the way the step is executed in my environment, e.g., (a) part location, (b) types of tools/ingredients used, (c) direction of movements, (d) how are objects held, (e) object states at the beginning of the step, (f) object state at the end of the step. The questions must be answerable by visually observing the activity, without reading instructions or trying out. Please indicate the type of question from (a) to (f) for each question asked at the beginning of the question. + +# Spatial FG question generation prompt + +Imagine I have no common sense or understanding of the 3D real world. I am trying to [task name] and am at the step where I am [action name]. There's [object list] when I'm [action name]. Ask me [N] questions about the 3D position of objects, relative location between objects, distance between objects, spatial relationship using prepositions like above, below, next to, etc. that I might want to know. The questions must be answerable by only visually observing me performing activity, without reading instructions or trying out. + +We explicitly encourage the LLM to provide questions that can be answered solely based on the video frames, in contrast to questions that are focused on external knowledge or non-groundable concepts or judging the execution of the step (e.g., avoid questions like is the pan hot enough to add the oil?), what tool is typically used to loosen the axle nut). The rationale for this is to collect as many Q&A pairs that a model cannot answer just based on external knowledge/language prior, but they rather + +require vision perception to be answered. Note that these questions are generated without visual input, hence they are not instance-specific and might not be answerable given the video segment. + +VLM-based instance-specific question generation After collecting a first set of Q&As using the LLM-generated questions, we bootstrap a VLM Question Generator model, which takes as input the video segment, question types and optionally the task name, and generates a set of instance-specific visual questions. The VLM Question Generator model is obtained by supervised fine-tuning of PLM with a question generation instruction-tuning dataset which consists of triplets (video, prompt, response), where the prompt includes the instruction to generate questions based on question types and the response includes example questions to be generated for the given video. Due to the lack of such a dataset with fine-grained question, we synthetically generated it by utilizing the Q&A pairs obtained based on the LLM-generated questions. Specifically, for each video segment, we use an LLM to (1) decompose existing Q&A pairs into multiple Q&A pairs, with each new question focusing on one detail of the original answer; (2) tag question types for the generated questions based on an expanded list of question types; and (3) generate a (prompt, response) pair for the segment. This resulted in $\sim 600k$ training instances. + +# VLM Question Generator training sample + +Generate 3 different questions that reveal the fine-grained details of the way the activity is executed. In particular, focus on these question types: fine-grained object locations, hand pose, object/repetition counts, generating at least one question per type. Write each question in a separate line, e.g., Q1. first question. + +Q2. second question. + +ON. N-th question. + +Response: + +Q1. Where are the tomatoes positioned prior to being cut? +Q2. How is the person grasping the tomato with their left hand? +Q3. How many tomatoes did the person use in the segment? + +LLM-based follow-up question generation This final set of questions aims to increase coverage of video details and generate highly fine-grained questions by leveraging the already collected Q&A pairs for each segment and feed them to an LLM that generates "follow-up" questions that are more detailed and challenging than the initial questions. + +# Follow-up question generation prompt + +I have the following information gathered about the video: [list of previous Q&A samples] Utilizing information and details from all the provided Q&A pairs (make sure to specialize questions based on the already corrected answers, e.g., using referring expressions), ask [N] most relevant and interesting, visual questions that we can ask annotators in order to reveal NEW, rich, additional fine-grained details about the video that we don't know yet, in particular about the following question types: 'tools/ingredients', 'object counts', 'repetition counts', 'direction of movement', 'hand pose', 'fine-grained object locations', 'spatial relations', 'initial state/end state', 'action happened before/after', 'clothes wearing', 'body pose', 'main action in the video', 'temporal extent of action', 'sizes'. The questions should be specific and have a specific answer. Avoid generic questions that can be very tedious to answer, e.g., how many objects are there in the scene. Also, do not generate questions that start with "Is ..." and then list options. Prefer open-ended questions, e.g., starting with "How". [... More examples & formatting ...] + +# G.1.3 Automatic Answer Generation + +The next step of the data engine aims to produce correct and comprehensive answers to the generated questions. We obtain automatic answers to the generated questions using a version of PLM that has been fine-tuned with extra privileged information of various forms as input. The privileged information includes textual annotations from the metadata available with the candidate training videos and feature embeddings extracted from off-the-shelf models. Useful textual metadata include the video title, ASR captions or written descriptions, video-level task name (inferred by an LLM using the title and captions), and any existing QAs for that video. Off-the-shelf embeddings include frame-level features extracted denseley at 1 fps; we use an open-vocabulary object detection model, OWLv2 [225], for embedding object detection information and CLIP ViT-L14 embeddings [226] + +
Question TypeSample Questions
Action RecognitionWhat is the process being performed on the sandpaper? +What is the action shown?
Action SequenceWhat does the person do after brewing the tea? +What does the person do before marking the vinyl with a pencil?
Counting ProblemsWhat is the quantity of universal down cleaner being poured into the task area? +How many branches does the person cut in total? +How many times does the person spray Greased Lightning onto the ketchup spill?
Movement DirectionIn what direction is the black welding tool pointing while the person is working on the metal joint? +How does the person chop the garlic with the knife?
Object AttributesWhat is the color of the seatpost shown in the video segment? +What is the shape of the tube at the end of the step? +What is the size of the knife being used to chop the spring onions?
Object LocationWhere does the person put the honey bottle away? +Where does the person position the clothes before ironing?
Object RecognitionWhat type of roller and paint are being used? +What does the person place on top of the smooth half of the egg carton? +What was the person initially holding in their left hand?
Object StateHow would you describe the sink at the beginning of the cleaning process? +What is the state of the nematode after mixing it with water and sponge?
OtherAt what point in the video is the person seen holding the wires?
PoseHow are the woman's legs positioned while she is sitting? +How bent is the left elbow during the activity?
Spatial RelationsHow far is the bias tape maker from the right edge of the ironing board? +What is the spatial relationship between the bowls and the Brussels sprouts on the kitchen countertop?
Speed/ForceHow would you describe the consistency of pressure applied during sanding? +How fast does the person initially push the stone?
+ +Table 17: PLM-FGQA question types and sample questions + +for scene classification information. We incorporate the textual annotations directly into language prompts using the following template: + +# Automatic answer generation prompt + +A video is showing a task [video level task name], specifically the part where [ASR caption]. Here is what we already know about the video: [existing question-answer pairs]. Answer this question in detail: [question] + +The off-the-shelf embeddings are incorporated into the PLM input via an additional Perceiver-IO[227] tokenizer, which summarizes the embeddings at the segment level. + +We fine-tune the answer generator on 1M manually annotated QA pairs. After fine-tuning, we deploy the trained answer generator with privileged information access on the unlabelled questions produced in the previous step, to produce automatic answers. + +# G.1.4 Human Annotation + +After obtaining segments and generating questions and automatic answers, we employ human annotators to obtain high-quality answers. Our answer annotations include the following: + +- Human-verified answers: Raters are provided with the model-generated answer and are asked to accept or reject the answer. They can reject questions for being irrelevant or unanswerable, and answers for being factually incorrect or lacking details. Accepted question-answer pairs proceed without changes, while rejected ones are handled differently: + +question-related rejections (irrelevant or unanswerable) are discarded, whereas answer-related rejections (factually incorrect or lacking details) are marked for correction in the next phase. $17.8\%$ of the total training samples are human-verified automatic answers. + +- Human annotated answers: Raters answer the questions from scratch by ensuring to cover all the relevant details within the temporal segment. They receive reference information, such as video-level task names and ASR captions, and may use online resources like WikiHow for additional context. Questions that cannot be answered based on the video segment (for example, due to some false premise) are rejected (with an explanation). These manually annotated answers make up $82.2\%$ of the PLM-FGQA training split, and $100\%$ of the evaluation set. + +Quality Control. Data quality is crucial for model success. We followed several strategies to monitor and enhance annotation quality: annotation Certification - we reviewed a small sample of annotations from each rater before they could work in production queues, ensuring that annotators met high-quality standards before advancing to production; golden Examples - annotators were provided with high-quality annotation examples, highlighting common error patterns and offering acceptable answers. targeted and Dual QA - we conducted daily audits, including vendor auditing and our own sampled quality control. In total, $13\%$ of the training set was audited, and $100\%$ of the samples in PLM-VideoBench underwent quality control. + +# G.2 FGQA PLM-VideoBench Construction + +
TrainTest
Sources stats
Total Videos767k3.6k
Unique Source Videos251k1.9
Average Duration (sec.)9.812.3
Annotations stats
Number of QA Pairs2.4M4.2k
Number Question Types1212
Question Length (avg/max)12/11412.3/56
Answer Length (avg/max)13.3/91114.1/62
Annotation TypeHumanHuman
Open-DomainYesYes
+ +Table 18: Statistics of the PLM-FGQA training and test data. The test split refers to the FGQA module of PLM-VideoBench. + +The FG-QA component of PLM-VideoBench is formed from a held-out portion of PLM-FGQA. We refine this set and transform it into a challenging MCQ-based benchmark by (1) generating MCQs, (2) filtering out samples that can be answered by text-only (blind) LLMs, (3) performing human verification of negatives, and (4) balancing the distribution of question types and domains. The statistics of the dataset are summarized in Table 18. In more detail the steps we followed are: + +MCQ Generation: To transform QAs into challenging MCQs for evaluation, instead of generating random incorrect answers, we prompt LLMs to produce hard negatives that are semantically close to the correct answer. We use the following prompt which was designed to generate distractors that differ from the correct answer by only a single detail. In effect this enables evaluation to assess fine-grained reasoning about object attributes and tool distinctions. + +Filtering Text-Only Answers: To ensure that video-based reasoning is required, we test whether a text-only LLM can answer the question correctly without seeing the video. If a question can be answered correctly from text alone, we remove or modify it to emphasize visual and temporal grounding. + +Human Verification of Negatives: Automatically generated negatives may sometimes be factually true despite being labeled as incorrect. To address this, we perform human verification, where annotators review distractors to confirm that they are both plausible yet definitively incorrect given the video context.MCQs with ambiguous distractors are removed. + +Balancing Question Types: Finally, after the above postprocessing and filtering is done, we rebalance the test set, to make sure that the question type and domain distributions are approximately uniform, by undersampling over-represented question types and domains. + +Note on the evaluation metric. We report the multi-binary accuracy (MBAcc) [99] to evaluate on the FG-QA task. This accuracy is calculated by comparing the correct answer to each distractor individually. Specifically, for each question, we generate a series of binary questions, where the correct answer is compared with one distractor at a time. A prediction is considered correct only if the correct answer is consistently selected across all binary comparisons. We preferred this metric to vanilla MCQ accuracy as it greatly reduces the predictability of automatically-generated MCQs. + +# MCQ generation prompt + +Here is a question and answer pair about a video: + +Q: [question] + +A: [answer] + +You need to transform this into a high-quality multiple-choice question. To do this, first rephrase the given correct answer and then provide n distractor answers. The n incorrect answers should be reasonable and valid responses to the question, but should have a different meaning than the correct answer. You generate an incorrect answer from the correct one by changing a single detail, e.g. an object or verb/action that is relevant to what's being asked. Make the incorrect answers realistic, plausible and similar enough to the correct answer so that it is very difficult for someone to distinguish between them with prior knowledge alone. Finding the correct answer should also require visual information about the scene. The distractor answers should answer the question, but should be incorrect but in a non-obvious way. When changing a single detail to create the distractors, make sure that this detail is the main point of the question. For example, if the question is about the color of an object, then the distractor should change the color of the object and not the kind of object. + +Here are some examples of good distractors (desired) and bad distractors (to be avoided): + +Q: What is the person wearing on their hands while applying varnish? + +A: The person is wearing white gloves on their hands while applying varnish with a brush. + +Good distractors: + +- The person is wearing black gloves on their hands while applying varnish with a brush. Bad distractors: + +- The person is wearing black gloves on their hands while applying paint with a roller. .. More examples & formatting ... + +# H PLM-STC Details + +We present PLM Spatio-Temporal Captions (PLM-STC), a novel dataset aimed at training and evaluating VLMs for spatial-temporal reasoning. We collected pairs of mask tablets for objects in videos, along with their corresponding detailed temporal descriptions. The annotations are collected on top of the SA-V [124] videos, which are diverse and high-quality. We excluded the test set videos from SA-V, to avoid any data cross contamination. Table 20 provides statistics about the dataset, such as number of total samples, training/val/test splits, object types, and time-segment duration. PLM-STC, is not only novel, but also larger and higher quality compared to existing datasets, see Table 19. In Fig. 5 (right), we show an example of our spatio-temporal captions, describing a little girl (highlighted in blue): (frame 0-81): A little girl moves back as beluga whale approaches her face. (frame 82-85): Out of frame. (frame 86-98): She tries to feed the whale. + +We describe the overall annotation process in Appendix H.1, and how we build the three sub-tasks in Appendix H.2. + +# H.1 Annotation Process + +The annotation process is summarized in Figure 14. The annotation process involves three stages: Object Selection and Tracking, Temporal Segmentation and Captioning and Verification and Quality Control. + +
DatasetSpatial TypeYear#VideosRegionsTemp. Seg.Captions?
DAVIS16-RVOS [228]Segmentation20185050-No
DAVIS17-RVOS [229]Segmentation201890205-No
YouCook2-BB [83]BBox2018647-4.3KNo
A2D Sentence [230]Segmentation20183.7K4.8K-No
J-HMDB Sentence [231]Segmentation2018928928-No
ActivityNet Entities [232]BBox201914.3K1.5M52KNo
VidSTG [9]BBox20206.9K44.8K-No
Refer-Youtube-VOS [233]Segmentation20203.9K7.5K-No
HC-STVG [234]BBox202116K16K-No
VLN [123]Mouse Trace202350K43.1K43.1KYes
MeVis [235]Segmentation20232K8.8K-No
PLM-STCSegmentation202545.7K122.3K194.2KYes
+ +Table 19: Spatio-Temporal-Captioning datasets comparison. + +![](images/9d17dc536750b2f00b965dc2e9b92faf6895b4a18767e1761d0d1e8226c4a309.jpg) +Figure 14: PLM-STC Annotation pipeline. + +# H.1.1 Object Selection and Tracking + +Annotators select interesting objects with significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. We instructed the annotators by defining interesting regions in video footage as those with the presence of significant, dynamic actions performed by subjects, which can be human, animal, or object. These regions involve multiple major actions that evolve over time, rather than static or insignificant actions. We provided annotators with examples of interesting regions, such as one featuring a person making a sandwich, a dog chasing a cat, or a kite getting stuck in a tree. The goal for the annotator is to identify regions with high delta, where the subject performs a sequence of significant activities that change over time, such as a person entering a room, sitting down, and then drinking from a glass. By focusing on these dynamic and evolving actions, annotators can effectively select regions worthy of captioning. Finally, annotators are provided with several examples of good and bad annotations. + +# H.1.2 Temporal Segmentation and Captioning + +Based on the selected mask tablets, another set of annotators provides time segments for each action and fills in the caption within each time segment. The annotators are instructed to focus on capturing major actions, avoiding minor details or unnecessary movements. When writing captions for each segment, they must ensure clarity in describing the subject's movements and directionality. Additionally, the annotators are advised to avoid making assumptions about the subject's actions or adding details not clearly visible, sticking only to what is directly observable in the frame. As in the previous task, the annotators are provided with several examples of good and bad annotations to guide their work. + +# H.1.3 Verification and Quality Control + +A final set of annotators manually verifies the tablets and time-segment captions to ensure accuracy and consistency. For mask refinement, we re-run the same pipeline as §H.1.1, while not letting the annotators choose the interesting object, but only refine the quality of the mask. For captioning refinement, the annotators are tasked with three objectives: 1) Redundancy: eliminate any repeating or redundant information to ensure the caption is concise; 2) Accuracy: verify that every word in the caption accurately describes a fact present in the video, correcting or removing any incorrect information; and 3) Actions: add missing major action information to the caption while preserving existing atomic actions, ensuring the caption effectively conveys the key events in the video. + +
AllTrainValTest
Dataset stats
Number of Videos45.2K42.0K8042.3K
Spatio Temporal Caption127.8K---
Temporal Caption198.7K---
Tube's categories
Person104.5K99.6K8612.4K
Animal16.8K13.2K5501.7K
Object/things6.4K4.4K4361.2K
Temporal captions per Tube
1 caption per tube78.9K73.9K8422.4K
2 caption per tube30.9K27.8K5661.7K
3 or more Caption per tube16.38K14.15K4211.2K
Tasks stats
Region Detailed Captioning (RDCap)122.3K117.2K2.5K2.6K
Region Captioning (RCap)194.2K179.5K4.6K10.1K
Region Temporal Localization (RTLoc)192.0K179.5K4.6K7.9K
+ +Table 20: PLM-STC dataset statistics. Note the for RTLoc, we filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap. + +# H.2 PLM-STC Benchmark + +We utilize the collected data to train and evaluate the PLM on three challenging tasks that are essential for video perception. Firstly, we created a balanced validation and test split by the combination of tube categories and number of caption per tube while making sure no video overlaps with the training set. This is done to make sure we evaluate all the categories presents in the dataset equally. Then, we process the data for each task: + +- Dense Video Region Captioning (RDCap). This comprehensive task combines both "what" and "when" aspects. The model takes the video and the tubelets as input and outputs the full time-segment captions. We also assign an out of frame caption to temporal segments for which the subject does not appear in the video to ensure dense temporal coverage of events across the video duration. + +Video Region Captioning (RCap). This task involves describing "what" activities are performed within a specific time frame by the objects in the tubelets. The model receives the video, the tubelets, and the temporal region as input and outputs the corresponding captions. We filter out events that refer to the subject when it is out-of-frame to avoid evaluating trivial captions. + +Region Temporal Localization (RTLoc). This task requires the model to localize "when" specific events occur in relation to a given tubelet. The input includes the video, the tubelet, and the caption, while the output is the start and end frames indicating when the captioned event occurs. Like RCap, we filter out out-of-frame events, as well as ambiguous events that may be localized to multiple time segments. For example, if the subject opens the door twice, the event text is guaranteed to be unique (e.g., referring to the first and second time they opened the door) or dropped entirely if ambiguous (e.g., if the text only mentions the action). + +These tasks are designed to both improve and evaluate the model's capabilities, with the same input-output format applied during both training and evaluation. Figure 6 illustrate an examples of the task, including the prompt used to train and evaluate the PLM. + +# I Smart Glasses Data + +# I.1 Data collection and annotation + +We collected the source videos for PLM-SGQA using commercial smart glasses, which enable participants to capture egocentric videos in a hands-free manner. Participants are presented with 14 categories of popular scenarios, such as shopping, cooking, and walking in a neighborhood, and are instructed to ask questions about their surroundings as if interacting with a multi-modal assistant that shares their visual perspective. Specifically, participants are asked to ask questions spontaneously, + +without delay, about the things they see and experience, and to focus on visual queries rather than dynamic information that may change regularly. After recording the videos, participants annotate the segments by marking the start and end points of the video relevant to each question, as well as providing the ground-truth answer. + +# I.2 SGQA Benchmark + +To create the SGQA component of PLM-VideoBench we first filtered the Q&As using an LLM to obtain a shortlist of questions that focus on human activity and also are perception-based rather than based on general knowledge. This means that SGQA focus on questions that require good visual understanding of the scene to be accurately answered. This process yields an evaluation set consisting of 655 Q&As. For the resulting Q&As, we then trimmed the original videos to obtain clips within the temporal boundary that the human wearer/annotator specified. As the annotated segments end at the point where the smart-glass wearer asks the question, it is important for all evaluations to specify that the question refers to the end of the video clip - e.g. see the prompt we used for PLM and baselines evaluation in 10. We summarize the statistics of the SGQA test set in Figures 15 and 16. + +
Sources stats
Total Videos663
Average Duration (sec.)29.4
Annotations stats
Number of QA Pairs665
Number Domains14
Question Length (avg/max)9.0 / 52
Answer Length (avg/max)21.6 / 40
Annotation TypeHuman
Open-DomainYes
+ +Figure 15: Statistics of the PLMSGQA test data. + +![](images/deb55031b0dcd69c607f38cdac47b1dcfd24e19c9457a8c15e649704593f1dbe.jpg) +Figure 16: Domain distribution of video-clips in PLMSGQA. + +# J Synthetic Data Engine + +Our data engine targets base capabilities of VLMs: image captioning, visual question answering, OCR, chart/diagram understanding, and video understanding. We developed different pipelines for images and videos, and includes different levels of metadata to generate captions and QAs. + +Image Captions: We caption high-quality images using Llama 3.1V 90B. An example is shown in Figure 17. We use this pipeline to caption SA1B [105], Object365 [135], and OpenImages [136]. + +OCR QAs: We leverage pre-extracted OCR and use it as input for a LLM (i.e., Llama 3.3 70B) to generate a set of five question-answer pairs. An example is shown in Figure 18. We use this pipeline to generate QAs for PDFAcc [132], and UCSF [133]. + +Image Captioning plus QAs: In cases for which OCR does not provide enough information to create questions (e.g., scientific figures), we further caption the image using Llama 3.1V 90B. Then we pass the caption with auxiliary metadata (e.g., OCR) to a LLM (i.e., Llama 3.3 70B) to generate question-answers pairs. An example is shown in Figure 19). We use this pipeline to generate captions and QAs for ArxivQA [59], DocVQA [53], InfoVQA [56] and Ai2d [55]. + +Video Captioning plus QAs: An image captioner is run on key-frames of the video, as well as a video captioner on the overall video at 1 fps. The result captions are passed to a LLM (i.e., Llama 3.3 70B, or Llama 3 405B) with additional metadata (e.g., video title etc.), so to generate a detailed caption and a multiple-chosen question answers pair. An example is shown in Figure 20). We use this pipeline to generate captions and QAs for YT-1B [236], Ego4d [115] $^{3}$ , DiDeMo [140], Charades [139], and Kinetics710 [121] $^{4}$ . + +# Detailed Caption + +![](images/1268ac9fb233bb4929bb4dab04092dfd0cbace7e85b8ed3fbef0c19086670027.jpg) + +The image depicts a yellow building with a red door and a sign that reads "YOLCU SALONU" in black letters. The building has a gable roof and a small window on the left side. There are two trees in front of the building, one on each side of the entrance. The ground is wet, and there is snow on the trees and the ground. In the background, there is a train station with a train on the tracks. The sky is gray and overcast. The image appears to be a photograph taken on a cold winter day. The presence of snow and the gray sky suggest that it is a winter scene. The building and the train station in the background suggest that it is a public place, possibly a train station or a bus stop. The sign on the building reads "YOLCU SALONU," which is Turkish for "Passenger Lounge." This suggests that the building is a waiting area for passengers. Overall, the image depicts a quiet and peaceful winter scene, with a focus on the building and the surrounding environment. + +![](images/31a57553c561071222c0d6a49171e30a665fd047b52d892d96493b8bb064d276.jpg) +Figure 17: Detailed caption example, and the corresponding pipeline. The captioner (i.e., Llama 3V 90B) is prompted to generate the caption for the provided image. + +# OCR + +![](images/4fe6fb4b3ec1c97204688a86055c7da0d2130a401a1b67f4a929d0c3a451e99b.jpg) + +# Question Answers + +Q: How many teams qualify for the finals? +A: The top 3 teams from each section, plus the next two highest teams on points, qualify for the finals, making a total of 5 teams. +Q: How many points has the Buderim team earned? +A: The Buderim team has earned 150 points. +Q: Which team is currently in the lead? +A: The Albert team is currently in the lead with 36 points and a margin of 39. +Q: What is the current margin of the Mooloolaba team? +A: The Mooloolaba team has a current margin of 1. +Q: What is the overall summary of the document? +A: The document appears to be a standings table for the Ladies Division Three of a bowling or lawn bowls competition, showing the current standings of various teams, including their win-loss records, points, and margins. + +# Pipeline + +![](images/0cee851dbcab299eafe247b722f18feb598d44c3a07241c1e4767a2c4fff2798.jpg) +Figure 18: Visual Question Answering pairs and the corresponding pipeline. The OCR text is extracted from the image, and passed to the LLM (i.e., Llama 3.3 70B) to generate QA pairs. + +# OCR + +![](images/8e79177a0ff95b0e8f386b3c911fa139e72510a8be6c7f623a43f333b8042601.jpg) + +SB 1011 10 Fraction of objects 0.2 0.1 0.0 0.01 0.1 1.0 10.0 erg s1 + +# Detailed Caption + +The image depicts a histogram of the distribution of objects, with the x-axis labeled "LSB[1046ergs-1]" and the y-axis labeled "Fraction of objects." The x-axis ranges from 0.01 to 10.0, while the y-axis ranges from 0.0 to 0.2. The histogram is divided into bins of varying widths, with the first bin spanning from 0.01 to 0.1, the second bin spanning from 0.1 to 1.0, and so on. Each bin contains a bar representing the fraction of objects within that range. The bars are shaded in gray, with some bins having multiple bars. A key feature of the histogram is the presence of a peak in the middle bin, which corresponds to an $L_{SB}$ value of around 1.0. This suggests that the majority of objects have an $L_{SB}$ value close to this value. The histogram also shows a tail extending towards higher $L_{SB}$ values, indicating that some objects have significantly higher $L_{SB}$ values than the majority. Overall, the histogram provides a visual representation of the distribution of $L_{SB}$ values among the objects being studied. It allows for easy identification of patterns and trends in the data, such as the peak in the middle bin and the tail towards higher $L_{SB}$ values. + +# Multi-Choice Question Answer (MCQA) + +What is the approximate $L_{SB}$ value at which the majority of objects have their peak? + +Options: +(A) 0.1 +(B) 1.0 +(C) 5.0 +(D) 10.0 +Answer: (B) 1.0. + +# Pipeline + +![](images/e412ee6336c6f3f3ecb37e27ff8cad8c9ab87a00109169edfb9921efea2bb8d3.jpg) +Figure 19: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. The OCR text is extracted from the image, and the caption is generated by the captioner (i.e., Llama 3V 90B), which are all passed to the LLM (i.e., Llama 3.3 70B) to generate MCQAs. + +![](images/7a2b9a755d06734efc06641a316b5956bffb59da9244e42a1b4b231e624b84af.jpg) + +# Metadata + +Title: Lions VS Colts Highlights 2017 Preseason Game + +Description: Comment suggestions for future videos and Enjoy! + +# Frame Caption + +Frame 435: The image shows a man with dreadlocks standing in front of a crowd of people in a stadium. He is wearing a white t-shirt and is surrounded by a group of people standing on the ground. On the left side of the image, there is a table fan, bottles, and other objects placed on a table. In the background, there are people sitting on chairs, stairs, railings, boards with text, lights, and the sky. The text on the boards reads "Indianapolis Colts vs San Francisco 49ers + +Frame 585: The image shows a football game being played on a TV screen, with a group of people on the ground and a few people standing in the background. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions. + +Frame 765: The image shows a group of people playing a game of football on a green field, with white lines marking the boundaries of the field. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions. + +Frame 945: The image shows a football game being played on a TV screen, with people wearing helmets and playing on the ground. At the bottom of the image, there is text and numbers indicating that the game is between the Detroit Lions and the Indianapolis Colts. + +# Video Caption + +Football players wearing helmets, Detroit Lions vs Indianapolis Colts, player running with ball, falls down, touchdown scored. + +# Detailed Caption + +A football player is running with the ball and then falls down, the game is between the Detroit Lions and the Indianapolis Colts, with players wearing blue and white uniforms and helmets, and people sitting in the stadium, a watermark on the image shows the teams playing, one player is laying on the ground next to another player wearing a white and blue jersey and a white helmet, a horseshoe-shaped object is on the grass, text on the image indicates a touchdown, players are wearing helmets and playing on the ground, with watermarks in the bottom left corner of the images, the game involves the Detroit Lions and the Indianapolis Colts, and also shows the Colts playing against the New England Patriots and the Lions playing against the Dallas Cowboys. + +# Multi-Choice Question Answer (MCQA) + +What is the outcome of the football player's action? + +Options: + +(A) the player runs out of bounds +(B) the player scores a touchdown +(C) the player drops the ball +(D) the player falls down +(E) the player gets injured +Answer: (B) the player scores a touchdown. + +# Pipeline + +![](images/6ae83b9a6dc806a8b301beb09601202a299d8e4d55734c7e55952388176da2f3.jpg) +Figure 20: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. An LLM (i.e., Llama 3.3 70B) with a video caption, frame captions at 1 fps, and additional metadata (e.g., video title etc.), generates a detailed caption and multiple-choices question answers pair. + +# K Qualitative Results + +![](images/1530b58b8d9bea8c5654bbea62fd379cc6feae8a7b23471dfe80e80aea475eb0.jpg) +Figure 21: PLM-8B OCR examples. + +Question: Provide a detailed description of the image. + +Answer: The image is a pink postage stamp featuring a map of the Grenadines of St. Vincent, with a red border and black text. The map shows various locations on the island, including hotels, bays, and other points of interest. + +*Key Features:* + +* *Map:* A pink map of the Grenadines of St. Vincent, with a red border and black text. +* *Locations:* Various locations on the island are marked, including hotels, bays, and other points of interest. +* *Scale:* A scale in miles is provided at the bottom of the map. +* *Denomination:* The denomination "30c" is printed in the bottom-right corner of the stamp. +* *Border:* The stamp has a red border with a serrated edge. + +*Overall Impression:* + +The image appears to be a vintage postage stamp from the Grenadines of St. Vincent, featuring a map of the island and various locations of interest. The stamp has a classic design and is likely from the mid-20th century. + +![](images/ed04d39d909fe72eeb12444135a8e509aba0d6b6dc41389801c5e33f2f769e55.jpg) +Figure 22: PLM-8B OCR in the wild examples. + +Question: What does the word say together from the bikers? + +Answer: The word says Indianapolis. + +![](images/00a522bfdf47031dcbd13b64c0084f5b20a14508fbc9c3cea078addb8671c68b.jpg) +Figure 23: PLM-8B OCR in the wild examples. + +Question: How tall is the. +kitty character in the image? + +Answer: The kitty character is approximately $100\mathrm{cm}$ tall. + +![](images/0b422e57894781ed7c26024a43c39c17fb9552b27d825a07a257853b593a0c99.jpg) +Figure 24: PLM-8B Natural image QA + +Question: What is inside the oven? + +Answer: There is a baking sheet inside the oven. + +Question: What temperature is the oven set to? + +Answer: The oven is set to 200 degrees. + +Question: What unit is the oven's temperature set to? + +Answer: The oven's temperature is set to Celsius. + +![](images/507366e34ef9bdd8494b0097e739e8fea6443778365fe28bca28c076dd89537e.jpg) +Figure 25: PLM-8B Captioning. + +Question: Describe the image in details + +Answer: A city skyline with a large body of water in the foreground. The city skyline is made up of tall buildings, some of which are skyscrapers. The buildings are densely packed together and vary in height and color. In the foreground, there is a large body of water that appears to be a harbor or bay. There are many boats and ships in the water, some of which are docked at the shore while others are sailing or anchored in the middle of the water. + +![](images/4c4e50b1e9bf713125cd4b7e2fc1c66361692d068cfaa23f80097b03c2b8e462.jpg) +RDCap + +Question: Describe the subject's actions within the red rectangles, providing start and end frames ids and a brief description for each action in a dense caption format. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video. + +Answer: Frame [0, 6]: Out of frame + +Frame [6, 15]: A woman is walking towards the camera with a child. She is holding the child's hand with her right hand and carrying a bag in her left hand + +Frame [16, 31]: Out of frame. + +# RCap + +Question: What is happening to the subject in the region highlighted by the red rectangle during frames [6, 15] in this 32 frame video? + +Answer: a woman is walking on a muddy road with a child. she is holding the child's hand with her right hand and adjusting her saree with her left hand while walking + +# RTLoc + +Question: Please provide the start and end frame of when 'an individual standing near a water body is drinking a from a water from a bottle' happens corresponding to the region marked by the blue rectangle. Use the format [start, end] where start and end are frame numbers between 0 and 31 in this 32 frame video. Answer: [23, 26] + +# Captioning + +Question: Please describe the video in detail, focusing on actions. + +Answer: This video features a group of people walking around a muddy area with a small bridge in the background. The video is taken outdoors and the weather is sunny. The video starts with a man standing on the left side of the frame, facing the camera. He is wearing a white shirt, a black vest, black trousers and white shoes. He has a scarf around his neck. The camera pans to the right and shows a group of people walking around. The video is taken from a low angle and the camera is handheld. + +The video ends with a man walking towards the left side of the frame. He is wearing a blue shirt, black trousers and brown shoes. He is holding a bottle in his right hand and is drinking from it. + +# Fine-grained QA + +Question: Please explain what the man in blue t-shirt is pointing at in the video. + +Answer: The man in blue t-shirt is pointing at the bridge. + +Figure 26: PLM-8B Video Region Captioning, Grounding and Fine-grained QA. + +Figure 27: PLM-8B Video Captioning +![](images/4736a69fcb2b8863af5879c939b7d3dfa72f0cdb4c8b8682d4d8f4a18ab8fb9d.jpg) +Question: Please give me a details of the video focusing on actions and interactions. +Answer: This video features a close up shot of a hand holding a plush toy mouse with a grey kitten standing on a white chair, reaching out and touching the toy with its paws. In the beginning of the video, the kitten is seen standing on the chair and reaching out with both paws to touch the toy mouse. The kitten then withdraws its paws and sits back on the chair. The hand holding the toy mouse is seen moving the toy closer to the kitten and then away from the kitten. The kitten then reaches out again and touches the toy with its paws. The hand then moves the toy away from the kitten again and the kitten looks to the left of the frame. +This video is set against a white background. + +Figure 28: PLM-8B Video QA +![](images/aa12bcb8edff9ba1ae7ef304cae20debee36198b18474ddd9e63ef6b76c6cf17.jpg) +Question: What are the ingredients being thrown into the mixer? +Answer: Carrots, apples, and bananas. + +# L Limitations and Future Work + +Our PLM models achieve strong performance against open-data baselines and proprietary models alike, however there is still room for improvement in both modeling and data. On the model front, we do not experiment extensively with long video modeling components (e.g., token compression, dynamic temporal resolution). As a result, our performance on long video benchmarks [92, 94, 96] is less competitive (see Table F). PLM is compatible with such newer advancements and can be incorporated in future work. + +Additionally, our results are sensitive to the characteristics of the base LLM. We see especially low performance of PLM on benchmarks such as MMMU [37], MME [41] and Video-MME [75] (see Tables 3 and 4), where the strongest baselines often rely on LLMs that are more verbose, but also have a likely much larger language component (see the gap to proprietary models on some benchmarks). We also note that our model performs relatively poorly on our SGQA task (Table 5), targeting a mix of perception and knowledge based questions to smart glasses. Strong chatbot-focused systems like GPT-4o excel at tasks that go beyond core perception. + +On the data front, our mix focuses squarely on visual perception — it does not include for example, multi-step reasoning, robotics or world-knowledge data. Despite these limitations, PLM contributes new capabilities and strong benchmark results, and set a new standard for fully reproducible VLMs. + +# M Broader Impact + +Our work aims to advance open and reproducible research in vision-language modeling by releasing models, data, and benchmarks that support open research. By not having any distillation from proprietary models, we hope to improve reproducible and transparent training and evaluation of VLM research. However, like all MLLMs, our Perception Language Model (PLM) may have some risks. Even by carefully selecting datasets and apply several mitigation (CSAM, NSFW, etc.), the model may still contain hidden biases or generate inappropriate or harmful content. We took steps to reduce these risks by teaching the model to refuse answering questions related to bias, harassment, or adult content. We also remove all samples containing any mention of human faces from all the datasets. + +We also annotate and release a large-scale dataset for fine-grained video question answering and spatio-temporal grounding. This release has the potential to significantly advance research in image and video understanding. Making the dataset openly available allows others to reproduce our work and invites broader community involvement. This transparency supports safer and more accountable progress, helping researchers better understand and address potential biases or limitations. + +We believe that by openly sharing our models and data, while actively addressing ethical concerns, our work can contribute positively to vision-language research. + +# References + +[1] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023. +[2] Erfei Cui, Yinan He, Zheng Ma, Zhe Chen, Hao Tian, Weiyun Wang, Kunchang Li, Yi Wang, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, Yali Wang, Limin Wang, Yu Qiao, and Jifeng Dai. Sharegpt-4o: Comprehensive multimodal annotations with gpt-4o, 2024. +[3] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. In European Conference on Computer Vision, pages 370-387. Springer, 2024. +[4] Farre Miquel, Marafioti Andres, Tunstall Lewis, von Werra Leandro, Conghui He, Cuenca Pedro, and Wolf Thomas. Finevideo: behind the scenes, 2024. +[5] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data, 2024. +[6] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, Li Yuan, Yu Qiao, Dahua Lin, Feng Zhao, and Jiaqi Wang. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. +[7] Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle-2: Faster inference of language models with dynamic draft trees, 2024b. URL https://arxiv.org/abs/2406.16858, 2024. +[8] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, and Jingjing Liu. Hero: Hierarchical encoder for video+ language omni-representation pre-training. arXiv preprint arXiv:2005.00200, 2020. +[9] Zhu Zhang, Zhou Zhao, Yang Zhao, Qi Wang, Huasheng Liu, and Lianli Gao. Where does it exist: Spatio-temporal video grounding for multi-form sentences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10668-10677, 2020. +[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. +[11] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Jen Dumas, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weis, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. +[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava- next: Improved reasoning,OCR,and world knowledge, January 2024. +[13] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +[14] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Anwen Hu, Haowei Liu, Qi Qian, Ji Zhang, and Fei Huang. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13040–13051, 2024. +[15] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. + +[16] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. arXiv preprint arXiv:2204.14198, 2022. +[17] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pretraining for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. +[18] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. +[19] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. +[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023. +[21] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. +[22] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Khan. Videogpt+: Integrating image and video encoders for enhanced video understanding. arXiv preprint arXiv:2406.09418, 2024. +[23] Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. +[24] Jiajun Liu, Yibing Wang, Hanghang Ma, Xiaoping Wu, Xiaoqi Ma, Xiaoming Wei, Jianbin Jiao, Enhua Wu, and Jie Hu. Kangaroo: A powerful video-language model supporting long-context video input. arXiv preprint arXiv:2408.15542, 2024. +[25] Xiaoqian Shen, Yunyang Xiong, Changsheng Zhao, Lemeng Wu, Jun Chen, Chenchen Zhu, Zechun Liu, Fanyi Xiao, Balakrishnan Varadarajan, Florian Bordes, et al. Longvu: Spatiotemporal adaptive compression for long video-language understanding. arXiv preprint arXiv:2410.17434, 2024. +[26] Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2025. +[27] Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024. +[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. +[29] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024. +[30] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. +[31] Rohan Choudhury, Guanglei Zhu, Sihan Liu, Koichiro Niinuma, Kris M Kitani, and László Jeni. Don't look twice: Faster video transformers with run-length tokenization. arXiv preprint arXiv:2411.05222, 2024. +[32] OpenAI. Gpt-4v(ision) system card, 2023. +[33] OpenAI. Gpt-4o system card, 2024. + +[34] Gemini Team Google. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. +[35] Gemini Team Google. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. +[36] Anthropic. The claude 3 model family: Opus, sonnet, haiku. 2024. +[37] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9556-9567, 2024. +[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024. +[39] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-okvqa: A benchmark for visual question answering using world knowledge, 2022. +[40] Jeffrey P Bigham, Chandrika Jayant, Hanjie Ji, Greg Little, Andrew Miller, Robert C Miller, Robin Miller, Aubrey Tatarowicz, Brandyn White, Samual White, et al. Vizwiz: nearly real-time answers to visual questions. In Proceedings of the 23nd annual ACM symposium on User interface software and technology, pages 333-342, 2010. +[41] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. +[42] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. +[43] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. +[44] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. In European Conference on Computer Vision, pages 148-166, 2025. +[45] xai. RealworldQA benchmark. https://huggingface.co/datasets/xai-org/RealworldQA, 2024. +[46] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. +[47] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024. +[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. +[49] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. +[50] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8948-8957, 2019. +[51] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2014. +[52] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019. + +[53] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. Docvqa: A dataset for vqa on document images. In 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 2199-2208, 2021. +[54] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. arXiv preprint arXiv:2407.21038, 2024. +[55] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016. +[56] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C. V. Jawahar. Infographicvqa. In 2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 2582-2591, 2022. +[57] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024. +[58] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. +[59] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024. +[60] Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. From recognition to cognition: Visual commonsense reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6720-6731, 2019. +[61] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 2507-2521. Curran Associates, Inc., 2022. +[62] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186, 2025. +[63] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. +[64] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024. +[65] Jierun Chen, Fangyun Wei, Jinjing Zhao, Sizhe Song, Bohuai Wu, Zhuoxuan Peng, S-H Gary Chan, and Hongyang Zhang. Revisiting referring expression comprehension evaluation in the era of large multimodal models. arXiv preprint arXiv:2406.16866, 2024. +[66] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. +[67] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024. +[68] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. +[69] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. +[70] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. + +[71] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024. +[72] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. Star: A benchmark for situated reasoning in real-world videos. In Thirty-fifth Conference on Neural Information Processing Systems (NeurIPS), 2021. +[73] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. Tgif-qa: Toward spatiotemporal reasoning in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2758–2766, 2017. +[74] Jie Lei, Licheng Yu, Mohit Bansal, and Tamara L Berg. Tvqa: Localized, compositional video question answering. arXiv preprint arXiv:1809.01696, 2018. +[75] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal ILms in video analysis. arXiv preprint arXiv:2405.21075, 2024. +[76] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 9127–9134, 2019. +[77] Munan Ning, Bin Zhu, Yujia Xie, Bin Lin, Jiaxi Cui, Lu Yuan, Dongdong Chen, and Li Yuan. Video-bench: A comprehensive benchmark and toolkit for evaluating video-based large language models. arXiv preprint arXiv:2311.16103, 2023. +[78] Jianrui Zhang, Mu Cai, and Yong Jae Lee. Vinoground: Scrutinizing Imms over dense temporal reasoning with short videos. arXiv preprint arXiv:2410.02763, 2024. +[79] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024. +[80] Daniel Cores, Michael Dorkenwald, Manuel Mucientes, Cees GM Snoek, and Yuki M Asano. Tvbench: Redesigning video-language evaluation. arXiv preprint arXiv:2410.07752, 2024. +[81] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016. +[82] David Chen and William B Dolan. Collecting highly parallel data for paraphrase evaluation. In Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies, pages 190-200, 2011. +[83] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI conference on artificial intelligence, volume 32, 2018. +[84] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4581-4591, 2019. +[85] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. +[86] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024. +[87] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D Manning. Auroracap: Efficient, performant video detailed captioning and a new benchmark. arXiv preprint arXiv:2410.03051, 2024. +[88] Yuxuan Wang, Yueqian Wang, Dongyan Zhao, Cihang Xie, and Zilong Zheng. Videohallucer: Evaluating intrinsic and extrinsic hallucinations in large video-language models. arXiv preprint arXiv:2406.16338, 2024. +[89] Jiacheng Zhang, Yang Jiao, Shaoxiang Chen, Jingjing Chen, and Yu-Gang Jiang. Eventhallusion: Diagnosing event hallucinations in video llms. arXiv preprint arXiv:2409.16597, 2024. + +[90] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2024. +[91] Ruchit Rawal, Khalid Saifullah, Miquel Farré, Ronen Basri, David Jacobs, Gowthami Somepalli, and Tom Goldstein. Cinepile: A long video question answering dataset and benchmark. arXiv preprint arXiv:2405.08813, 2024. +[92] Weihan Wang, Zehai He, Wenyi Hong, Yean Cheng, Xiaohan Zhang, Ji Qi, Xiaotao Gu, Shiyu Huang, Bin Xu, Yuxiao Dong, et al. Lvbench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024. +[93] Makarand Tapaswi, Yukun Zhu, Rainer Stiefelhagen, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. Movieqa: Understanding stories in movies through question-answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4631–4640, 2016. +[94] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37:28828-28857, 2025. +[95] Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18221-18232, 2024. +[96] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. +[97] Guo Chen, Yicheng Liu, Yifei Huang, Yuping He, Baoqi Pei, Jilan Xu, Yali Wang, Tong Lu, and Limin Wang. Cg-bench: Clue-grounded question answering benchmark for long video understanding. arXiv preprint arXiv:2412.12075, 2024. +[98] Orr Zohar, Xiaohan Wang, Yann Dubois, Nikhil Mehta, Tong Xiao, Philippe Hansen-Estruch, Licheng Yu, Xiaofang Wang, Felix Juefei-Xu, Ning Zhang, et al. Apollo: An exploration of video understanding in large multimodal models. arXiv preprint arXiv:2412.10360, 2024. +[99] Mu Cai, Reuben Tan, Jianrui Zhang, Bocheng Zou, Kai Zhang, Feng Yao, Fangrui Zhu, Jing Gu, Yiwu Zhong, Yuzhang Shang, et al. Temporalbench: Benchmarking fine-grained temporal understanding for multimodal video models. arXiv preprint arXiv:2410.10818, 2024. +[100] Ziyao Shangguan, Chuhan Li, Yuxuan Ding, Yanan Zheng, Yilun Zhao, Tesca Fitzgerald, and Arman Cohan. Tomato: Assessing visual temporal reasoning capabilities in multimodal foundation models. arXiv preprint arXiv:2410.23266, 2024. +[101] Wenyi Hong, Yean Cheng, Zhuoyi Yang, Weihan Wang, Lefan Wang, Xiaotao Gu, Shiyu Huang, Yuxiao Dong, and Jie Tang. Motionbench: Benchmarking and improving fine-grained video motion understanding for vision language models. arXiv preprint arXiv:2501.02955, 2025. +[102] Yuanxin Liu, Shicheng Li, Yi Liu, Yuxiang Wang, Shuhuai Ren, Lei Li, Sishuo Chen, Xu Sun, and Lu Hou. Tempcompass: Do video llms really understand videos? arXiv preprint arXiv:2403.00476, 2024. +[103] Mohammadreza Salehi, Jae Sung Park, Tanush Yadav, Aditya Kusupati, Ranjay Krishna, Yejin Choi, Hannaneh Hajishirzi, and Ali Farhadi. Actionatlas: A videoqa benchmark for domain-specialized action recognition. arXiv preprint arXiv:2410.05774, 2024. +[104] Daniel Bolya, Po-Yao Huang, Peize Sun, Jang Hyun Cho, Andrea Madotto, Chen Wei, Tengyu Ma, Jiale Zhi, Jathushan Rajasegaran, Hanoona Rasheed, et al. Perception encoder: The best visual embeddings are not at the output of the network. arXiv preprint arXiv:2504.13181, 2025. +[105] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023. +[106] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. + +[107] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Intervl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. +[108] Brandon Castellano. PySceneDetect. +[109] Ahmed Masry, Do Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Findings of the Association for Computational Linguistics: ACL 2022, pages 2263-2279, Dublin, Ireland, May 2022. Association for Computational Linguistics. +[110] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019. +[111] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering, 2017. +[112] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019. +[113] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017. +[114] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. +[115] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonio Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, Miguel Martin, Tushar Nagarajan, Ilija Radosavovic, Santhosh Kumar Ramakrishnan, Fiona Ryan, Jayant Sharma, Michael Wray, Mengmeng Xu, Eric Zhongcong Xu, Chen Zhao, Siddhant Bansal, Dhruv Batra, Vincent Cartillier, Sean Crane, Tien Do, Morrie Doulaty, Akshay Erapalli, Christoph Feichtenhofer, Adriano Fragomeni, Qichen Fu, Abraham Gebreselasie, Cristina Gonzalez, James Hillis, Xuhua Huang, Yifei Huang, Wenqi Jia, Weslie Khoo, Jachym Kolar, Satwik Kottur, Anurag Kumar, Federico Landini, Chao Li, Yanghao Li, Zhenqiang Li, Karttikeya Mangalam, Raghava Modhugu, Jonathan Munro, Tullie Murrell, Takumi Nishiyasu, Will Price, Paola Ruiz Puentes, Merey Ramazanova, Leda Sari, Kiran Somasundaram, Audrey Southerland, Yusuke Sugano, Ruijie Tao, Minh Vo, Yuchen Wang, Xindi Wu, Takuma Yagi, Ziwei Zhao, Yunyi Zhu, Pablo Arbelaez, David Crandall, Dima Damen, Giovanni Maria Farinella, Christian Fuegen, Bernard Ghanem, Vamsi Krishna Ithapu, C. V. Jawahar, Hanbyul Joo, Kris Kitani, Haizhou Li, Richard Newcombe, Aude Oliva, Hyun Soo Park, James M. Rehg, Yoichi Sato, Jianbo Shi, Mike Zheng Shou, Antonio Torralba, Lorenzo Torresani, Mingfei Yan, and Jitendra Malik. Ego4d: Around the world in 3,000 hours of egocentric video. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. +[116] Kristen Grauman, Andrew Westbury, Lorenzo Torresani, Kris Kitani, Jitendra Malik, Triantafyllos Afouras, Kumar Ashutosh, Vijay Baiyya, Siddhant Bansal, Bikram Boote, Eugene Byrne, Zachary Chavis, Joya Chen, Feng Cheng, Fu-Jen Chu, Sean Crane, Avijit Dasgupta, Jing Dong, María Escobar, Cristhian Forigua, Abraham Kahsay Gebreselasie, Sanjay Haresh, Jing Huang, Md Mohaiminul Islam, Suyog Dutt Jain, Rawal Khirodkar, Devansh Kukreja, Kevin J Liang, Jia-Wei Liu, Sagnik Majumder, Yongsen Mao, Miguel Martin, Effrosyni Mavroudi, Tushar Nagarajan, Francesco Ragusa, Santhosh K. Ramakrishnan, Luigi Seminara, Arjun Somayazulu, Yale Song, Shan Su, Zihui Xue, Edward Zhang, Jinxu Zhang, Angela Castillo, Changan Chen, Xinzhu Fu, Ryosuke Furuta, Cristina Gonzalez, Prince Gupta, Jiabo Hu, Yifei Huang, Yiming Huang, Weslie Khoo, Anush Kumar, Robert Kuo, Sach Lakhavani, Miao Liu, Mingjing Luo, Zhengyi Luo, Brighid Meredith, Austin Miller, Oluwatuminu Oguntola, Xiaqing Pan, Penny Peng, Shraman Pramanick, Merey Ramazanova, Fiona Ryan, Wei Shan, Kiran Somasundaram, Chenan Song, Audrey Southerland, Masatoshi Tateno, Huiyu Wang, Yuchen Wang, Takuma Yagi, Mingfei Yan, Xitong Yang, Zecheng Yu, Shengxin Cindy Zha, Chen Zhao, Ziwei Zhao, Zhifan Zhu, Jeff Zhuo, Pablo Arbeláez, Gedas Bertasius, David J. Crandall, Dima Damen, Jakob Julian Engel, Giovanni Maria Farinella, Antonino Furnari, Bernard Ghanem, Judy Hoffman, C. V. Jawahar, Richard A. Newcombe, Hyun Soo Park, James M. Rehg, Yoichi Sato, Manolis Savva, Jianbo Shi, Mike Zheng Shou, and Michael Wray. Ego-exo4d: Understanding skilled human activity from first- and third-person perspectives. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19383-19400, 2023. + +[117] Yansong Tang, Dajun Wang, Zhenyu Xu, Jingjing Liu, Xiaoyong Wang, Xing Gao, Jinhui Tang, and Dong Wu. Coin: A large-scale dataset for comprehensive instructional video analysis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. +[118] Dimitri Zhukov, Jean-Baptiste Alayrac, Chen Sun, Ivan Laptev, Cordelia Schmid, and Josef Sivic. Cross-task weakly supervised learning from instructional videos. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. +[119] Thong Thanh Nguyen, Zhiyuan Hu, Xiaobao Wu, Cong-Duy T Nguyen, See-Kiong Ng, and Anh Tuan Luu. Encoding and controlling global semantics for long-form video question answering. arXiv preprint arXiv:2405.19723, 2024. +[120] Kexin Yi, Chuang Gan, Yunzhu Li, Pushmeet Kohli, Jiajun Wu, Antonio Torralba, and Joshua B Tenenbaum. Clevrer: Collision events for video representation and reasoning. arXiv preprint arXiv:1910.01442, 2019. +[121] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. +[122] Raghav Goyal, Samira Ebrahimi Kahou, Vincent Michalski, Joanna Materzynska, Susanne Westphal, Heuna Kim, Valentin Haenel, Ingo Fruend, Peter Yianilos, Moritz Mueller-Freitag, et al. The" something something" video database for learning and evaluating visual common sense. In Proceedings of the IEEE international conference on computer vision, pages 5842-5850, 2017. +[123] Paul Voigtlaender, Soravit Changpinyo, Jordi Pont-Tuset, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with video localized narratives. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2461-2471, 2023. +[124] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. +[125] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. +[126] Soichiro Fujita, Tsutomu Hirao, Hidetakam Kamigaito, Manabu Okumura, and Masaaki Nagata. Soda: Story oriented dense video captioning evaluation framework. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VI 16, pages 517-531. Springer, 2020. +[127] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 11:635-651, 2023. +[128] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. +[129] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. +[130] Tri Dao. Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691, 2023. +[131] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Alan Lerer. Automatic differentiation in pytorch, 2017. +[132] Montalvo Pablo and Wightman Ross. PDF association dataset (pdfa), 2024. +[133] Montalvo Pablo and Wightman Ross. Industry documents library (idl), 2024. +[134] Lei Li, Yuqi Wang, Runxin Xu, Peiyi Wang, Xiachong Feng, Lingpeng Kong, and Qi Liu. Multimodal arxiv: A dataset for improving scientific comprehension of large vision-language models. arXiv preprint arXiv:2403.00231, 2024. +[135] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019. + +[136] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, Tom Duerig, and Vittorio Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. IJCV, 2020. +[137] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in neural information processing systems, 34:23634-23651, 2021. +[138] Mathew Monfort, SouYoung Jin, Alexander Liu, David Harwath, Rogerio Feris, James Glass, and Aude Oliva. Spoken moments: Learning joint audio-visual representations from video descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14871–14881, 2021. +[139] Gunnar A Sigurdsson, Gúl Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 510-526. Springer, 2016. +[140] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pages 5803-5812, 2017. +[141] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with $2.8\mathrm{m}$ challenging questions. arXiv preprint arXiv:2502.13124, 2025. +[142] Kushal Kafle, Scott Cohen, Brian Price, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In CVPR, 2018. +[143] Nitesh Methani, Pritha Ganguly, Mitesh M. Khapra, and Pratyush Kumar. Plotqa: Reasoning over scientific plots. In The IEEE Winter Conference on Applications of Computer Vision (WACV), March 2020. +[144] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps, 2022. +[145] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 947-952, 2019. +[146] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020. +[147] Samira Ebrahimi Kahou, Vincent Michalski, Adam Atkinson, Akos Kadar, Adam Trischler, and Yoshua Bengio. Figureqa: An annotated figure dataset for visual reasoning, 2018. +[148] Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. The hateful memes challenge: Detecting hate speech in multimodal memes. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 2611-2624. Curran Associates, Inc., 2020. +[149] Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C. Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning, 2016. +[150] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. In The 35th Conference on Neural Information Processing Systems (NeurIPS) Track on Datasets and Benchmarks, 2021. +[151] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning, 2023. +[152] Yilun Zhao, Chen Zhao, Linyong Nan, Zhenting Qi, Wenlin Zhang, Xiangru Tang, Boyu Mi, and Dragomir Radev. Robut: A systematic study of table qa robustness against human-annotated adversarial perturbations. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6064–6081, Toronto, Canada, July 2023. Association for Computational Linguistics. + +[153] Hugo Laurençon, Léo Tronchon, and Victor Sanh. Unlocking the conversion of web screenshots into html code with the websight dataset, 2024. +[154] Yuke Zhu, Oliver Groth, Michael Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. In IEEE Conference on Computer Vision and Pattern Recognition, 2016. +[155] Manoj Acharya, Kushal Kafle, and Christopher Kanan. Tallyqa: Answering complex counting questions. In AAAI, 2019. +[156] Jonas Belouadi, Anne Lauscher, and Steffen Eger. Automatikz: Text-guided synthesis of scientific vector graphics with tikz, 2024. +[157] Mengye Ren, Ryan Kiros, and Richard Zemel. Exploring models and data for image question answering. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015. +[158] Jason Obeid and Enamul Hoque. Chart-to-text: Generating natural language descriptions for charts by adapting the transformer model. In Brian Davis, Yvette Graham, John Kelleher, and Yaji Sripada, editors, Proceedings of the 13th International Conference on Natural Language Generation, pages 138-147, Dublin, Ireland, December 2020. Association for Computational Linguistics. +[159] Benny J. Tang, Angie Boggust, and Arvind Satyanarayan. Vistext: A benchmark for semantically rich chart captioning. In The Annual Meeting of the Association for Computational Linguistics (ACL), 2023. +[160] Zhiyu Chen, Wenhu Chen, Charese Smiley, Sameena Shah, Iana Borova, Dylan Langdon, Reema Moussa, Matt Beane, Ting-Hao Huang, Bryan Routledge, and William Yang Wang. Finqa: A dataset of numerical reasoning over financial data. In Marie-Francine Moens, Xuanjing Huang, Lucia Specia, and Scott Wen-tau Yih, editors, Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3697-3711, Online and Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics. +[161] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marcal Rusinol, C.V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4290-4300, 2019. +[162] Fengbin Zhu, Wenqiang Lei, Youcheng Huang, Chao Wang, Shuo Zhang, Jiancheng Lv, Fuli Feng, and Tat-Seng Chua. Tat-qa: A question answering benchmark on a hybrid of tabular and textual content in finance. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3277–3287, Online, August 2021. Association for Computational Linguistics. +[163] Chris Wendler. Renderedtext, 2024. +[164] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. Raven: A dataset for relational and analogical visual reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. +[165] Urs-Viktor Marti and H. Bunke. Theiam-database:An english sentence database for offline handwriting recognition.International Journal on Document Analysis and Recognition,5:39-46,11 2002. +[166] Pan Lu, Liang Qiu, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, Tanmay Rajpurohit, Peter Clark, and Ashwin Kalyan. Dynamic prompt learning via policy gradient for semi-structured mathematical reasoning. In International Conference on Learning Representations (ICLR), 2023. +[167] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension, 2020. +[168] Bryan Wang, Gang Li, Xin Zhou, Zhourong Chen, Tovi Grossman, and Yang Li. Screen2words: Automatic mobile ui summarization with multimodal learning. In The 34th Annual ACM Symposium on User Interface Software and Technology, UIST '21, page 498-510, New York, NY, USA, 2021. Association for Computing Machinery. +[169] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning, 2023. +[170] Aniruddha Kembhavi, Minjoon Seo, Dustin Schwenk, Jonghyun Choi, Ali Farhadi, and Hannaneh Hajishirzi. Are you smarter than a sixth grader? textbook question answering for multimodal machine comprehension. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5376-5384, 2017. + +[171] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. Visualmrc: Machine reading comprehension on document images. In AAAI, 2021. +[172] Jason Lau, Soumya Gayen, Asma Ben Abacha, and Dina Demner-Fushman. A dataset of clinically generated visual questions and answers about radiology images. Scientific Data, 5:180251, 11 2018. +[173] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, Jian-Guang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio, editors, Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1094-1110, Dublin, Ireland, May 2022. Association for Computational Linguistics. +[174] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. In The Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021), 2021. +[175] Diagram image to text dataset, 2023. +[176] Bo Li, Yuanhan Zhang, Liangyu Chen, Jinghao Wang, Fanyi Pu, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Mimic-it: Multi-modal in-context instruction tuning, 2023. +[177] Yilun Zhao, Yunxiang Li, Chenying Li, and Rui Zhang. Multihiertt: Numerical reasoning over multi hierarchical tabular and textual data. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6588-6600, Dublin, Ireland, May 2022. Association for Computational Linguistics. +[178] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Anna Korhonen, David Traum, and Lluis Márquez, editors, Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6418-6428, Florence, Italy, July 2019. Association for Computational Linguistics. +[179] Harsh Jhamtani et al. Learning to describe differences between pairs of similar images. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4024-4034, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. +[180] Haoping Bai, Shancong Mou, Tatiana Likhomanenko, Ramazan Gokberk Cinbis, Oncel Tuzel, Ping Huang, Jiulong Shan, Jianjun Shi, and Meng Cao. Vision datasets: A benchmark for vision-based industrial inspection. arXiv preprint arXiv:2306.07890, 2023. +[181] Tanmay Gupta, Dustin Schwenk, Ali Farhadi, Derek Hoiem, and Aniruddha Kembhavi. Imagine this! scripts to compositions to videos. In Proceedings of the European conference on computer vision (ECCV), pages 598-613, 2018. +[182] Benno Krojer, Vaibhav Adlakha, Vibhav Vineet, Yash Goyal, Edoardo Ponti, and Siva Reddy. Image retrieval from contextual descriptions. arXiv preprint arXiv:2203.15867, 2022. +[183] Phillip Isola, Joseph J Lim, and Edward H Adelson. Discovering states and transformations in image collections. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1383-1391, 2015. +[184] Yingshan Chang, Mridu Narang, Hisami Suzuki, Guihong Cao, Jianfeng Gao, and Yonatan Bisk. Webqa: Multihop and multimodal qa. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16495-16504, 2022. +[185] Maxwell Forbes, Christine Kaeser-Chen, Piyush Sharma, and Serge Belongie. Neural naturalist: Generating fine-grained image comparisons. arXiv preprint arXiv:1909.04101, 2019. +[186] Hareesh Ravi, Kushal Kafle, Scott Cohen, Jonathan Brandt, and Mubbasir Kapadia. Aesop: Abstract encoding of stories, objects, and pictures. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2052-2063, 2021. +[187] Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Nazli Ikizler-Cinbis. Recipeqa: A challenge dataset for multimodal comprehension of cooking recipes. arXiv preprint arXiv:1809.00812, 2018. +[188] Dong Huk Park, Trevor Darrell, and Anna Rohrbach. Robust change captioning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4624-4633, 2019. + +[189] Rumeysa Bodur, Erhan Gundogdu, Binod Bhattarai, Tae-Kyun Kim, Michael Donoser, and Loris Bazzani. iedit: Localised text-guided image editing with weak supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7426-7435, 2024. +[190] Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Chengqing Zong and Michael Strube, editors, Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1470–1480, Beijing, China, July 2015. Association for Computational Linguistics. +[191] Ye Yuan, Xiao Liu, Wondimu Dikubab, Hui Liu, Zhilong Ji, Zhongqin Wu, and Xiang Bai. Syntax-aware network for handwritten mathematical expression recognition. arXiv preprint arXiv:2203.01601, 2022. +[192] Yasumasa Onoe, Sunayana Rane, Zachary Berger, Yonatan Bitton, Jaemin Cho, Roopal Garg, Alexander Ku, Zarana Parekh, Jordi Pont-Tuset, Garrett Tanzer, et al. Docci: Descriptions of connected and contrasting images. In European Conference on Computer Vision, pages 291-309. Springer, 2024. +[193] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating clip-style models on dense captions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26700-26709, 2024. +[194] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen-tau Yih, et al. Altogether: Image captioning via re-aligning alt-text. arXiv preprint arXiv:2410.17251, 2024. +[195] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019. +[196] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE international conference on computer vision, pages 2641–2649, 2015. +[197] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 787-798, 2014. +[198] Xiyao Wang, Yuhang Zhou, Xiaoyu Liu, Hongjin Lu, Yuancheng Xu, Feihong He, Jaehong Yoon, Taixi Lu, Gedas Bertasius, Mohit Bansal, Huaxiu Yao, and Furong Huang. Mementos: A comprehensive benchmark for multimodal large language model reasoning over image sequences, 2024. +[199] Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23056-23065, 2023. +[200] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021. +[201] Nazneen Rajani, Lewis Tunstall, Edward Beeching, Nathan Lambert, Alexander M. Rush, and Thomas Wolf. No robots. https://huggingface.co/datasets/HuggingFaceH4/no Robots, 2023. +[202] Aida Amini, Saadia Gabriel, Peter Lin, Rik Koncel-Kedziorski, Yejin Choi, and Hannaneh Hajishirzi. Mathqa: Towards interpretable math word problem solving with operation-based formalisms. arXiv preprint arXiv:1905.13319, 2019. +[203] Chunting Zhou, Pengfei Liu, Puxin Xu, Srinivasan Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, et al. Lima: Less is more for alignment. Advances in Neural Information Processing Systems, 36:55006-55021, 2023. +[204] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +[205] Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. Finetuned language models are zero-shot learners. In International Conference on Learning Representations. + +[206] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. +[207] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024. +[208] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575, 2015. +[209] Kaichen Zhang, Bo Li, Peiyuan Zhang, Fanyi Pu, Joshua Adrian Cahyono, Kairui Hu, Shuai Liu, Yuhan Zhang, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Reality check on the evaluation of large multimodal models, 2024. +[210] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multimodality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. +[211] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. +[212] Bin Yan, Yi Jiang, Jiannan Wu, Dong Wang, Zehuan Yuan, Ping Luo, and Huchuan Lu. Universal instance perception as object discovery and retrieval. In CVPR, 2023. +[213] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172, 2023. +[214] Jang Hyun Cho, Boris Ivanovic, Yulong Cao, Edward Schmerling, Yue Wang, Xinshuo Weng, Boyi Li, Yurong You, Philipp Kraehenbuehl, Yan Wang, and Marco Pavone. Language-image models with 3d understanding. In The Thirteenth International Conference on Learning Representations, 2025. +[215] Yale Song, Eugene Byrne, Tushar Nagarajan, Huiyu Wang, Miguel Martin, and Lorenzo Torresani. Ego4d goal-step: Toward hierarchical understanding of procedural activities. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 38863-38886. Curran Associates, Inc., 2023. +[216] Triantafyllos Afouras, Effrosyni Mavroudi, Tushar Nagarajan, Huiyu Wang, and Lorenzo Torresani. HT-step: Aligning instructional articles with how-to videos. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. +[217] Effrosyni Mavroudi, Triantafyllos Afouras, and Lorenzo Torresani. Learning to ground instructional articles in videos through narrations. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 15201-15213, October 2023. +[218] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. +[219] Hyolim Kang, Jinwoo Kim, Taehyun Kim, and Seon Joo Kim. Uboco: Unsupervised boundary contrastive learning for generic event boundary detection. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20041-20050, 2021. +[220] Zexing Du, Xue Wang, Guoqing Zhou, and Qing Wang. Fast and unsupervised action boundary detection for action segmentation. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3313-3322, 2022. +[221] PySceneDetect: Video Cut Detection and Analysis Tool, https://github.com/breakthrough/pyscenedetect. +[222] J. S. Chung and A. Zisserman. Out of time: automated lip sync in the wild. In Workshop on Multi-view Lip-reading, ACCV, 2016. + +[223] Zi-Yi Dou, Xitong Yang, Tushar Nagarajan, Huiyu Wang, Jing Huang, Nanyun Peng, Kris Kitani, and Fu-Jen Chu. Unlocking exocentric video-language data for egocentric video representation learning. ArXiv, abs/2408.03567, 2024. +[224] Dandan Shan, Jiaqi Geng, Michelle Shu, and David Fouhey. Understanding human hands in contact at internet scale. In CVPR, 2020. +[225] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In Proceedings of the 37th International Conference on Neural Information Processing Systems, 2023. +[226] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021. +[227] Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. Perceiver io: A general architecture for structured inputs & outputs. ICLR, 2022. +[228] F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung. A benchmark dataset and evaluation methodology for video object segmentation. In Computer Vision and Pattern Recognition, 2016. +[229] Sergi Caelles, Jordi Pont-Tuset, Federico Perazzi, Alberto Montes, Kevis-Kokitsi Maninis, and Luc Van Gool. The 2019 davis challenge on vos: Unsupervised multi-object segmentation. arXiv:1905.00737, 2019. +[230] Yan Yan, Chenliang Xu, Dawen Cai, and Jason J Corso. Weakly supervised actor-action segmentation via robust multi-task ranking. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1298-1307, 2017. +[231] Ujjal Kr Dutta, Mehrtash Harandi, and Chellu Chandra Sekhar. Unsupervised deep metric learning via orthogonality based probabilistic loss. IEEE Transactions on Artificial Intelligence, 1(1):74-84, 2020. +[232] Luowei Zhou, Yannis Kalantidis, Xinlei Chen, Jason J Corso, and Marcus Rohrbach. Grounded video description. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6578-6587, 2019. +[233] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020. +[234] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021. +[235] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2694-2703, 2023. +[236] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16375-16387, 2022. \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13180/images/00959a907eafab253ebdf64533fd027c7f96ad991275ff48d2062fcf5260ca42.jpg b/data/2025/2504_13xxx/2504.13180/images/00959a907eafab253ebdf64533fd027c7f96ad991275ff48d2062fcf5260ca42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b81cf5e4b64a25895e5e6ebbac92a099ef877009 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/00959a907eafab253ebdf64533fd027c7f96ad991275ff48d2062fcf5260ca42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c8854a2d2d0b9929efcb0893c2c38b704260109fcad3d3e25dc5e1e7b3318a4 +size 651 diff --git a/data/2025/2504_13xxx/2504.13180/images/00a522bfdf47031dcbd13b64c0084f5b20a14508fbc9c3cea078addb8671c68b.jpg b/data/2025/2504_13xxx/2504.13180/images/00a522bfdf47031dcbd13b64c0084f5b20a14508fbc9c3cea078addb8671c68b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..438acaba8ffd4782b9d0eff4c3bbf878d6a5fab4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/00a522bfdf47031dcbd13b64c0084f5b20a14508fbc9c3cea078addb8671c68b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdd75bfa489b7e040cf1c1c00ad1c647a401f76b4fd898e644abf492ca6367ac +size 8230 diff --git a/data/2025/2504_13xxx/2504.13180/images/0255fd220e6ac7c13d170a81727d27bea3e506b3d3069ec1c128a222ce73a63f.jpg b/data/2025/2504_13xxx/2504.13180/images/0255fd220e6ac7c13d170a81727d27bea3e506b3d3069ec1c128a222ce73a63f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c868625ddb2f738864474d20412ef9b7f5919ab --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/0255fd220e6ac7c13d170a81727d27bea3e506b3d3069ec1c128a222ce73a63f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57534921eda56c4d50ba0a45ae7fb6d7d5c294b541f8dc4b470f4791a9fdb93d +size 680 diff --git a/data/2025/2504_13xxx/2504.13180/images/0634050c5c6c955d91463775c87e845f941b15af2cd726e113fcf6f598984ca1.jpg b/data/2025/2504_13xxx/2504.13180/images/0634050c5c6c955d91463775c87e845f941b15af2cd726e113fcf6f598984ca1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a278869753b1a403993b725cb087b5f7e82ee3c8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/0634050c5c6c955d91463775c87e845f941b15af2cd726e113fcf6f598984ca1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:994517be565f24fa6ca37b410f0583544a4c804bb20d43ae004f8a15bfe1fff0 +size 916 diff --git a/data/2025/2504_13xxx/2504.13180/images/08bee4120d15c4cdbacba31bbe60ee9f1500ebfbdb2f97a74191f10929d65b87.jpg b/data/2025/2504_13xxx/2504.13180/images/08bee4120d15c4cdbacba31bbe60ee9f1500ebfbdb2f97a74191f10929d65b87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e81fab955cfa55d6e999b81f7cd0e0df5251e06e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/08bee4120d15c4cdbacba31bbe60ee9f1500ebfbdb2f97a74191f10929d65b87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b55d296a117f9265778eab93f0cc54c6c77576dc0626ebb740e9e0fa715013a0 +size 643 diff --git a/data/2025/2504_13xxx/2504.13180/images/0b422e57894781ed7c26024a43c39c17fb9552b27d825a07a257853b593a0c99.jpg b/data/2025/2504_13xxx/2504.13180/images/0b422e57894781ed7c26024a43c39c17fb9552b27d825a07a257853b593a0c99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb0a2f0d34a32e972e807886a34904cabb4f2905 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/0b422e57894781ed7c26024a43c39c17fb9552b27d825a07a257853b593a0c99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b94576d23463d760ea8320031c308b03a09ae067f336df6b04692c58a2eff9e +size 10803 diff --git a/data/2025/2504_13xxx/2504.13180/images/0c0a20a943cefe72f701d79b7f86c02d855115c56f28fa87e454639b1e91c242.jpg b/data/2025/2504_13xxx/2504.13180/images/0c0a20a943cefe72f701d79b7f86c02d855115c56f28fa87e454639b1e91c242.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61856268fc1a5ab9c34de645d93f46a21b70a490 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/0c0a20a943cefe72f701d79b7f86c02d855115c56f28fa87e454639b1e91c242.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62261f6ae964e2cd36846fab8d69977009e944e672fcf7bae3e0da38dab975b5 +size 704 diff --git a/data/2025/2504_13xxx/2504.13180/images/0cee851dbcab299eafe247b722f18feb598d44c3a07241c1e4767a2c4fff2798.jpg b/data/2025/2504_13xxx/2504.13180/images/0cee851dbcab299eafe247b722f18feb598d44c3a07241c1e4767a2c4fff2798.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c78353b6e747d4c964a9ff033e198fe8240164e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/0cee851dbcab299eafe247b722f18feb598d44c3a07241c1e4767a2c4fff2798.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21f50fb5b775d58657e341c60bc1f8178cd92ef159d14fc1543dcdaf9ab23f58 +size 6992 diff --git a/data/2025/2504_13xxx/2504.13180/images/0f31fcadc2545bde6adc001104d669a9c44c08c90a96da858abeba5c2c96a6be.jpg b/data/2025/2504_13xxx/2504.13180/images/0f31fcadc2545bde6adc001104d669a9c44c08c90a96da858abeba5c2c96a6be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32f3497c05cc15e2d671ac2b106e384734c0f18e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/0f31fcadc2545bde6adc001104d669a9c44c08c90a96da858abeba5c2c96a6be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d487a60185c4c03579557b6a3eb1f2b1bc636aca354a0642e08c9569df45c50 +size 817 diff --git a/data/2025/2504_13xxx/2504.13180/images/1143283bbc17c530501279b590e48166d0db6a1109e811e196e05fc60f1b5b76.jpg b/data/2025/2504_13xxx/2504.13180/images/1143283bbc17c530501279b590e48166d0db6a1109e811e196e05fc60f1b5b76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..168e1697a6232219eca7f68f647a4c712f9a5e41 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/1143283bbc17c530501279b590e48166d0db6a1109e811e196e05fc60f1b5b76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81935b41c9f625565b2304414a18b9693b1da922431be94746b564348b4029c3 +size 670 diff --git a/data/2025/2504_13xxx/2504.13180/images/11534c4770ca3e4df1da4029b256918d26cd41aad652b75a64983e9aad4afe44.jpg b/data/2025/2504_13xxx/2504.13180/images/11534c4770ca3e4df1da4029b256918d26cd41aad652b75a64983e9aad4afe44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6a915bb66c1625c1567f3df72fd3a378b85d7b2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/11534c4770ca3e4df1da4029b256918d26cd41aad652b75a64983e9aad4afe44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5cb79452955b8a09162b4da7603f905a19d9f3f6cf534ede74d68bbfef57416 +size 823 diff --git a/data/2025/2504_13xxx/2504.13180/images/11f87cbff7e7f08a9001c94408c8ee70a0f8f29c2ff7c14281721040043a07c5.jpg b/data/2025/2504_13xxx/2504.13180/images/11f87cbff7e7f08a9001c94408c8ee70a0f8f29c2ff7c14281721040043a07c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74d276760c7b5c9dd97e7d6b007443134c1dc15a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/11f87cbff7e7f08a9001c94408c8ee70a0f8f29c2ff7c14281721040043a07c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a3ff028a6debdfaaeea0cf113f036364109930122ce7ecd75420438dfc3544b +size 10278 diff --git a/data/2025/2504_13xxx/2504.13180/images/1268ac9fb233bb4929bb4dab04092dfd0cbace7e85b8ed3fbef0c19086670027.jpg b/data/2025/2504_13xxx/2504.13180/images/1268ac9fb233bb4929bb4dab04092dfd0cbace7e85b8ed3fbef0c19086670027.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5df6f40315bd18e964026940e4047dc4eb367784 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/1268ac9fb233bb4929bb4dab04092dfd0cbace7e85b8ed3fbef0c19086670027.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2fd05e86cd8cf676c6c2a3be8bf948530366c45e6c472e6766bc49d8f61fe08 +size 28313 diff --git a/data/2025/2504_13xxx/2504.13180/images/1530b58b8d9bea8c5654bbea62fd379cc6feae8a7b23471dfe80e80aea475eb0.jpg b/data/2025/2504_13xxx/2504.13180/images/1530b58b8d9bea8c5654bbea62fd379cc6feae8a7b23471dfe80e80aea475eb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3b178ae5dc29ac6ebb2eede7343731d5328f0e5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/1530b58b8d9bea8c5654bbea62fd379cc6feae8a7b23471dfe80e80aea475eb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bbefc9dd9b3e676528c6939e503aee2a4dff6f813df5cd95990f42f42d15cf9 +size 39719 diff --git a/data/2025/2504_13xxx/2504.13180/images/15d1e4348b171804a04b545daf5f6f95190b7b83cd493103d526f75cae05e941.jpg b/data/2025/2504_13xxx/2504.13180/images/15d1e4348b171804a04b545daf5f6f95190b7b83cd493103d526f75cae05e941.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d919457aaa3892e30587de9e2cd9aad72a9b536 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/15d1e4348b171804a04b545daf5f6f95190b7b83cd493103d526f75cae05e941.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27d870f127f64d0307ab3b31e7fef7fa50bdcc3982f4b022888f10bfcbf5af17 +size 14332 diff --git a/data/2025/2504_13xxx/2504.13180/images/167690a1116fa3a65a9272f93b7c75944cfdb4555c529306b935c42b6cabbfcc.jpg b/data/2025/2504_13xxx/2504.13180/images/167690a1116fa3a65a9272f93b7c75944cfdb4555c529306b935c42b6cabbfcc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75e67b3c0a40fb8ac8cc09b791601432468dd634 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/167690a1116fa3a65a9272f93b7c75944cfdb4555c529306b935c42b6cabbfcc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36d132727c1895e45d4329e69335f1873c1a84842275ae33b23c736beab0df23 +size 637 diff --git a/data/2025/2504_13xxx/2504.13180/images/17c5eb7fd14ddd780b690caf976e0b5d6fbf98b5581472641250019c0586823f.jpg b/data/2025/2504_13xxx/2504.13180/images/17c5eb7fd14ddd780b690caf976e0b5d6fbf98b5581472641250019c0586823f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3166fb306fe3d0ddb62df7a7aff4374dafe2d674 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/17c5eb7fd14ddd780b690caf976e0b5d6fbf98b5581472641250019c0586823f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef81cb2a1dac0106ad863a90a7e92547d279921c0a6c0ea1274dfbc421147f5 +size 741 diff --git a/data/2025/2504_13xxx/2504.13180/images/1828408ddca6d94bb42085ebace965210f63ef6d5aaefc0c5f8fd1a9b62002e5.jpg b/data/2025/2504_13xxx/2504.13180/images/1828408ddca6d94bb42085ebace965210f63ef6d5aaefc0c5f8fd1a9b62002e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f75a17eadeba1eb4381995a7a48798bdd8449d93 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/1828408ddca6d94bb42085ebace965210f63ef6d5aaefc0c5f8fd1a9b62002e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:040f505a3a6ce660828b85475faf96966b4986f089568f4ef2ce1eb484a4ff2a +size 65918 diff --git a/data/2025/2504_13xxx/2504.13180/images/1a7f2c5e0b01ddbfc1ebb31508d2b53949e7690e93cf520c512940ee99f3d650.jpg b/data/2025/2504_13xxx/2504.13180/images/1a7f2c5e0b01ddbfc1ebb31508d2b53949e7690e93cf520c512940ee99f3d650.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2db9442c8081f7c185eacb3296d77bca79c13ac --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/1a7f2c5e0b01ddbfc1ebb31508d2b53949e7690e93cf520c512940ee99f3d650.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06b444d3c038bb9868e931b858f05004303b2bdfbd0a5c572fe1f55a73490dea +size 11093 diff --git a/data/2025/2504_13xxx/2504.13180/images/2046c732751f3301f7e0973665e26c74a9d9a78af266e79571054ff968423369.jpg b/data/2025/2504_13xxx/2504.13180/images/2046c732751f3301f7e0973665e26c74a9d9a78af266e79571054ff968423369.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2a5a992f9a320456cf6a4d11b980d08ceb3e7f7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/2046c732751f3301f7e0973665e26c74a9d9a78af266e79571054ff968423369.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e353ddf875454566f242a2e807265da3f3c90e9a6e6346dfd63d85bc4db0e50d +size 13730 diff --git a/data/2025/2504_13xxx/2504.13180/images/23424f3efaac5d26d36c163a60d6ae225962eedc063b4b408179011888538ac6.jpg b/data/2025/2504_13xxx/2504.13180/images/23424f3efaac5d26d36c163a60d6ae225962eedc063b4b408179011888538ac6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b37dc1b0c74a752cfb19c2ad6c5153d1bf767603 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/23424f3efaac5d26d36c163a60d6ae225962eedc063b4b408179011888538ac6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fa0dde536e9fce4ba2f8e83122c48aa0c139946b65317fc0214fee7e11894e2 +size 4238 diff --git a/data/2025/2504_13xxx/2504.13180/images/273c50f3acd90ba19b5a065e5955c09ee0f714dc7f1e3cfc5b137b8e820a6380.jpg b/data/2025/2504_13xxx/2504.13180/images/273c50f3acd90ba19b5a065e5955c09ee0f714dc7f1e3cfc5b137b8e820a6380.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3ad706a7fa243f0f261a86eef9eb5d739bd8673 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/273c50f3acd90ba19b5a065e5955c09ee0f714dc7f1e3cfc5b137b8e820a6380.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51eee7857ce90c086b3f1d5c5bad60ce5bc13fae28b602a801558cda289bd515 +size 9486 diff --git a/data/2025/2504_13xxx/2504.13180/images/29e17118daecdb0236c798256cd130694fef900820dada6615bbaac98b8d8473.jpg b/data/2025/2504_13xxx/2504.13180/images/29e17118daecdb0236c798256cd130694fef900820dada6615bbaac98b8d8473.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6bb692fe09a4c0dc7a3b21aa3471320f37cfe3f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/29e17118daecdb0236c798256cd130694fef900820dada6615bbaac98b8d8473.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760a05648a7da9c537686d7f0707d99377936d3879948d103e4228c62a17b983 +size 658 diff --git a/data/2025/2504_13xxx/2504.13180/images/2a83600a57b71970f5457ecdbd63b416783eb5ed14f72d374d98f922554cea39.jpg b/data/2025/2504_13xxx/2504.13180/images/2a83600a57b71970f5457ecdbd63b416783eb5ed14f72d374d98f922554cea39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b85b87694ab20ca9f91d8420771ff9664c26301c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/2a83600a57b71970f5457ecdbd63b416783eb5ed14f72d374d98f922554cea39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:880ca8699cc4f44d535a71dc8819cc569b45582ef9fd2cdf79afc284c96f6a88 +size 19245 diff --git a/data/2025/2504_13xxx/2504.13180/images/2bb4604e65b559f08032de179856131f0797537783eeacc79f3eccd2b0f7dd37.jpg b/data/2025/2504_13xxx/2504.13180/images/2bb4604e65b559f08032de179856131f0797537783eeacc79f3eccd2b0f7dd37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16bcbfe62942bb934cec5a754b5b7230af1bf13b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/2bb4604e65b559f08032de179856131f0797537783eeacc79f3eccd2b0f7dd37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7b76e9ecdd7e6e99b46f545b4ab78d6e5f1901601f9ca6d79d46c8e7ff84f97 +size 749 diff --git a/data/2025/2504_13xxx/2504.13180/images/2bcd6ae87dc81ee9d74711aff2bc23d783e6471cd8e696a0f03e4b894bb0b5b6.jpg b/data/2025/2504_13xxx/2504.13180/images/2bcd6ae87dc81ee9d74711aff2bc23d783e6471cd8e696a0f03e4b894bb0b5b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f030abcab56b3850bd3371d00975fe20a67747e7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/2bcd6ae87dc81ee9d74711aff2bc23d783e6471cd8e696a0f03e4b894bb0b5b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40ea774cd17da64a13e6e90395c4ac49265f2f3f2368d93d4b54032cf7868186 +size 46347 diff --git a/data/2025/2504_13xxx/2504.13180/images/31a57553c561071222c0d6a49171e30a665fd047b52d892d96493b8bb064d276.jpg b/data/2025/2504_13xxx/2504.13180/images/31a57553c561071222c0d6a49171e30a665fd047b52d892d96493b8bb064d276.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77283d1ef26ef3e778c435494160549decf05d6a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/31a57553c561071222c0d6a49171e30a665fd047b52d892d96493b8bb064d276.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:487b8d2af785578143bc68cfb2a0cf0bb35e66f1a3fa0c3b3cbcf94f24844364 +size 7328 diff --git a/data/2025/2504_13xxx/2504.13180/images/351b84e7e6eab42fbcfc176442e53f21a364c3de841a66578ee280d83a08d964.jpg b/data/2025/2504_13xxx/2504.13180/images/351b84e7e6eab42fbcfc176442e53f21a364c3de841a66578ee280d83a08d964.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3826685cc852ad60a82c59b00179a4cc9ec944a9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/351b84e7e6eab42fbcfc176442e53f21a364c3de841a66578ee280d83a08d964.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba0f26724584c9335a2fd7ead0e19873445bfdd01caec74f91b3f7c6f38da3de +size 4134 diff --git a/data/2025/2504_13xxx/2504.13180/images/3cb34894424b16abcd3d3c152fa48af28bed03b50451ed2a221c53cd3c8b8e04.jpg b/data/2025/2504_13xxx/2504.13180/images/3cb34894424b16abcd3d3c152fa48af28bed03b50451ed2a221c53cd3c8b8e04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0db02e5cdd20dc3fbe754bdff3da66184ad5c3d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/3cb34894424b16abcd3d3c152fa48af28bed03b50451ed2a221c53cd3c8b8e04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5139795be7e85d10d9aed1ff3917068c9bdd4fc058add619bbc1e3119b91ef79 +size 736 diff --git a/data/2025/2504_13xxx/2504.13180/images/3e89be3574960690c64f0b7055c09f35fd1083243414bd350b97a1cb23b8a777.jpg b/data/2025/2504_13xxx/2504.13180/images/3e89be3574960690c64f0b7055c09f35fd1083243414bd350b97a1cb23b8a777.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84071eca6536f8a421639c7518a5e790210bb3ee --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/3e89be3574960690c64f0b7055c09f35fd1083243414bd350b97a1cb23b8a777.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f13f79ef6fd61dbab69ba14dbc894bcac07e6a1332d235f23be3b91058936ac2 +size 10868 diff --git a/data/2025/2504_13xxx/2504.13180/images/458aaf373773ebd260cfcb625ce01d51d1384862292a9f919b559e5e0e8baf6b.jpg b/data/2025/2504_13xxx/2504.13180/images/458aaf373773ebd260cfcb625ce01d51d1384862292a9f919b559e5e0e8baf6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bb312a4dcc98e1a598c7c18817759fa380c8bb7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/458aaf373773ebd260cfcb625ce01d51d1384862292a9f919b559e5e0e8baf6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d4263dbe54d6916e56fcd135c2bdcb7b90f48a819b7b02bc80f9bea1bb333b3 +size 5506 diff --git a/data/2025/2504_13xxx/2504.13180/images/4736a69fcb2b8863af5879c939b7d3dfa72f0cdb4c8b8682d4d8f4a18ab8fb9d.jpg b/data/2025/2504_13xxx/2504.13180/images/4736a69fcb2b8863af5879c939b7d3dfa72f0cdb4c8b8682d4d8f4a18ab8fb9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3d4b7f2c8f20c5f6029d884cc35f8710833b4b4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/4736a69fcb2b8863af5879c939b7d3dfa72f0cdb4c8b8682d4d8f4a18ab8fb9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:024b247cbed723697722ad59b31e067c3d340948e51a7c6654b44cd2a3cab757 +size 16284 diff --git a/data/2025/2504_13xxx/2504.13180/images/47547bc1f66e8b472335e2743482081a23778369dfd135fb268632f3e2c4efde.jpg b/data/2025/2504_13xxx/2504.13180/images/47547bc1f66e8b472335e2743482081a23778369dfd135fb268632f3e2c4efde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d6c5660e629c6c3f942be5f7eed390c2f5da2b4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/47547bc1f66e8b472335e2743482081a23778369dfd135fb268632f3e2c4efde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:229480a4a664fd080fbc031ac3a407e4fcc0a25d0aa37b1902fd59ffe538d259 +size 47965 diff --git a/data/2025/2504_13xxx/2504.13180/images/487a1ee41bc10230d773ecc387fbde4d501749ece8e09a761762110ba188ea0b.jpg b/data/2025/2504_13xxx/2504.13180/images/487a1ee41bc10230d773ecc387fbde4d501749ece8e09a761762110ba188ea0b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ae162ee8d4e9f263f3b9e99a23bee0e47ddb7e9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/487a1ee41bc10230d773ecc387fbde4d501749ece8e09a761762110ba188ea0b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fd696661103a2aff34751a64b1d39c926f6902cc65b71a6d19b41a5095f8748 +size 14017 diff --git a/data/2025/2504_13xxx/2504.13180/images/48e62c91a4f9cb332ecf9e55e6ff53d1d47295a99bc55f91839966cf6ebf9686.jpg b/data/2025/2504_13xxx/2504.13180/images/48e62c91a4f9cb332ecf9e55e6ff53d1d47295a99bc55f91839966cf6ebf9686.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ead739342dc83dc75557c0fef9345fa8b62a9e6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/48e62c91a4f9cb332ecf9e55e6ff53d1d47295a99bc55f91839966cf6ebf9686.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c8447e4aeaf05147fd73e5f49ee4b59ff76d8963259f913171c8e1b06cbc0f1 +size 217211 diff --git a/data/2025/2504_13xxx/2504.13180/images/4ba34e7af89ce61daee2cab5adc88f0f87203cafb8df2cdb93055231529325fe.jpg b/data/2025/2504_13xxx/2504.13180/images/4ba34e7af89ce61daee2cab5adc88f0f87203cafb8df2cdb93055231529325fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1a8d4ec30cf71ce641bdff67a2a6c12085c511c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/4ba34e7af89ce61daee2cab5adc88f0f87203cafb8df2cdb93055231529325fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:036fee83655e3f39b058d7ee3c69d4aee933ee3774f4154b0f52f8726c0a829c +size 11776 diff --git a/data/2025/2504_13xxx/2504.13180/images/4c457bf8ecc0ab0daa868ea993daecb6172db218673b9689778445fa6b15ba2c.jpg b/data/2025/2504_13xxx/2504.13180/images/4c457bf8ecc0ab0daa868ea993daecb6172db218673b9689778445fa6b15ba2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cfc8eb5319d8af7d64cc7945286317f7c7dcf9c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/4c457bf8ecc0ab0daa868ea993daecb6172db218673b9689778445fa6b15ba2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffe4a6fcc26608287d90ac094b13b34fedcc2147b2e2acee6d53b431e6696b0f +size 10367 diff --git a/data/2025/2504_13xxx/2504.13180/images/4c4e50b1e9bf713125cd4b7e2fc1c66361692d068cfaa23f80097b03c2b8e462.jpg b/data/2025/2504_13xxx/2504.13180/images/4c4e50b1e9bf713125cd4b7e2fc1c66361692d068cfaa23f80097b03c2b8e462.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54a5a30b3a30889196338a4615af456046efd020 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/4c4e50b1e9bf713125cd4b7e2fc1c66361692d068cfaa23f80097b03c2b8e462.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f199c8694b891c96882221f2f0080006cecd913f6b2cc34cb0cfce06f9e27ab6 +size 97248 diff --git a/data/2025/2504_13xxx/2504.13180/images/4d825ae476a074dbec45eef2c486a29543783462afb9b5ea512cff79f913689b.jpg b/data/2025/2504_13xxx/2504.13180/images/4d825ae476a074dbec45eef2c486a29543783462afb9b5ea512cff79f913689b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38f13c20f568d09b86af78d7fa02b9ddf91be154 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/4d825ae476a074dbec45eef2c486a29543783462afb9b5ea512cff79f913689b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3b6e0c43c494d8e6cddda4f25480b5316da5d21ef5f9fe26e49b4a864a12db7 +size 63637 diff --git a/data/2025/2504_13xxx/2504.13180/images/4fe6fb4b3ec1c97204688a86055c7da0d2130a401a1b67f4a929d0c3a451e99b.jpg b/data/2025/2504_13xxx/2504.13180/images/4fe6fb4b3ec1c97204688a86055c7da0d2130a401a1b67f4a929d0c3a451e99b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb3bb97229805c2561948a6dcfaebaf1aac59a22 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/4fe6fb4b3ec1c97204688a86055c7da0d2130a401a1b67f4a929d0c3a451e99b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdc3f2fb188f24a2f2ff6748d0e52cb5d73d1780d8e4778e213749ef07493930 +size 99018 diff --git a/data/2025/2504_13xxx/2504.13180/images/507366e34ef9bdd8494b0097e739e8fea6443778365fe28bca28c076dd89537e.jpg b/data/2025/2504_13xxx/2504.13180/images/507366e34ef9bdd8494b0097e739e8fea6443778365fe28bca28c076dd89537e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4adb17f356b800e9b8e0e5edf084c877da9d1bd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/507366e34ef9bdd8494b0097e739e8fea6443778365fe28bca28c076dd89537e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:229ff33e7674021b2445c762ba68e3ee18854833cf51a951f05c4d92e5d3bef6 +size 41471 diff --git a/data/2025/2504_13xxx/2504.13180/images/516c83e5d6908944dc93b5e074a65757fe20d9b8f53f088e3a30c9295209e87e.jpg b/data/2025/2504_13xxx/2504.13180/images/516c83e5d6908944dc93b5e074a65757fe20d9b8f53f088e3a30c9295209e87e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02d45ed9509c3394d2ab71abfe9a9e2049cbd81b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/516c83e5d6908944dc93b5e074a65757fe20d9b8f53f088e3a30c9295209e87e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dde23949bf2ef31176678d2a46e0fea45ea63ba55af9d5e9d9302335f05d53f +size 43200 diff --git a/data/2025/2504_13xxx/2504.13180/images/52ea6fb7910ba74270ecfac5de8092c609865cd4806fe73566a5fb07f843d9bf.jpg b/data/2025/2504_13xxx/2504.13180/images/52ea6fb7910ba74270ecfac5de8092c609865cd4806fe73566a5fb07f843d9bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c1080cbb7930494d48ef3215ab0998cce1db838 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/52ea6fb7910ba74270ecfac5de8092c609865cd4806fe73566a5fb07f843d9bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00a424e3d1f44f11178ab303525eeaf3c87b2e06ab3fd31c37a5680ae7eaf85d +size 120588 diff --git a/data/2025/2504_13xxx/2504.13180/images/573bd6d623a8d5b13a56fb1f2ea46ca6cef2b1bfd8d35822d07f3e063f8a145a.jpg b/data/2025/2504_13xxx/2504.13180/images/573bd6d623a8d5b13a56fb1f2ea46ca6cef2b1bfd8d35822d07f3e063f8a145a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4121e5852dec19800b250160ea29242f94defbe --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/573bd6d623a8d5b13a56fb1f2ea46ca6cef2b1bfd8d35822d07f3e063f8a145a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef65b860f705baf81b10c4577767387424e0948c2ee6a6755a92e9ea6a03fc4e +size 15053 diff --git a/data/2025/2504_13xxx/2504.13180/images/596ed4223987ed7563cda1983d1635941fdc699dfd1a300ada8f50b7f5e44453.jpg b/data/2025/2504_13xxx/2504.13180/images/596ed4223987ed7563cda1983d1635941fdc699dfd1a300ada8f50b7f5e44453.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d281efdf80081a5bcc82ca3d79c280bcc104add --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/596ed4223987ed7563cda1983d1635941fdc699dfd1a300ada8f50b7f5e44453.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6df5c715f72f2e9f77ce0e87083961549b6ebf5b88b31507e203bfddb40d6f0d +size 846 diff --git a/data/2025/2504_13xxx/2504.13180/images/5e5f6ebd7ada288edd9b5f729682ee4729833b0cb5006c1f67e04520365cd919.jpg b/data/2025/2504_13xxx/2504.13180/images/5e5f6ebd7ada288edd9b5f729682ee4729833b0cb5006c1f67e04520365cd919.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4282d06d91086bb26af3e3ed3137c7e8772e345 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/5e5f6ebd7ada288edd9b5f729682ee4729833b0cb5006c1f67e04520365cd919.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98669f4fd10e4c7d05ae99b3a5f2398e0afd7623bee2e1dae1b12de683d80e95 +size 9189 diff --git a/data/2025/2504_13xxx/2504.13180/images/5f618a5368f89a021eb81345e3435f2d49ab45856488999908734b930348342e.jpg b/data/2025/2504_13xxx/2504.13180/images/5f618a5368f89a021eb81345e3435f2d49ab45856488999908734b930348342e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29574961b8e8ae6ee571671416f8508fef703416 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/5f618a5368f89a021eb81345e3435f2d49ab45856488999908734b930348342e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d6e74390d91da154127a3cc83bb012ae7d89351bd946666772506683fc242c7 +size 8516 diff --git a/data/2025/2504_13xxx/2504.13180/images/602809f1913f9496e798db0d1ff6265cb86eca6e2102ca6f99b6a28a2a55ecfb.jpg b/data/2025/2504_13xxx/2504.13180/images/602809f1913f9496e798db0d1ff6265cb86eca6e2102ca6f99b6a28a2a55ecfb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e237f494bde77877c5e8ec0aa8a7d0f85611e5bb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/602809f1913f9496e798db0d1ff6265cb86eca6e2102ca6f99b6a28a2a55ecfb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af00e0bab2e4020a17ce5eabe91725c259739fb1aceeaa1d124a4499ab1f9e86 +size 820 diff --git a/data/2025/2504_13xxx/2504.13180/images/6066602844e8b0ad50061ab254751ffb1f532fee4ceadb24ac1ba8e92a6d26a3.jpg b/data/2025/2504_13xxx/2504.13180/images/6066602844e8b0ad50061ab254751ffb1f532fee4ceadb24ac1ba8e92a6d26a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..478b41c8241cc26b4cb7a2dc50f26b96cd6dcc90 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/6066602844e8b0ad50061ab254751ffb1f532fee4ceadb24ac1ba8e92a6d26a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:744e5a400919d9293b5980a66d65ee794b40c778f31189b56920c6d560d2cb3c +size 11504 diff --git a/data/2025/2504_13xxx/2504.13180/images/62e5bca9a4bdf80bc199e8421fe81eaf8f45fa81c53e4ab87f0912b80879803e.jpg b/data/2025/2504_13xxx/2504.13180/images/62e5bca9a4bdf80bc199e8421fe81eaf8f45fa81c53e4ab87f0912b80879803e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbe96400eda529f6ad75a7b2697feee50b62f21c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/62e5bca9a4bdf80bc199e8421fe81eaf8f45fa81c53e4ab87f0912b80879803e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:512178673cb8c9006346953df2a37b8f1e7fdc67e4fba2a0c1ee21ab0b779ac5 +size 123091 diff --git a/data/2025/2504_13xxx/2504.13180/images/6462db27df34dca5202d51ff30b4e6d53ccde5c5c7a6e1bbdb6382312ad5451a.jpg b/data/2025/2504_13xxx/2504.13180/images/6462db27df34dca5202d51ff30b4e6d53ccde5c5c7a6e1bbdb6382312ad5451a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb8e4a645ea8d72f013acc59aa2fb9a53e5ed464 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/6462db27df34dca5202d51ff30b4e6d53ccde5c5c7a6e1bbdb6382312ad5451a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:878a524015c04fd655f9f575332a9d93861dc6fe1df9debddd8428e9e04b64e5 +size 135564 diff --git a/data/2025/2504_13xxx/2504.13180/images/6525a07da003b5c1fffc7c0015dafa7775dff35e2d902f016405d9ed15508a31.jpg b/data/2025/2504_13xxx/2504.13180/images/6525a07da003b5c1fffc7c0015dafa7775dff35e2d902f016405d9ed15508a31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efa624380156a5bb808addfa5938e3fe82639aec --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/6525a07da003b5c1fffc7c0015dafa7775dff35e2d902f016405d9ed15508a31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8529a1af9b38eb5b0c8b865ae7e94ec9bc85138609566d6cda63893f33d37944 +size 21636 diff --git a/data/2025/2504_13xxx/2504.13180/images/6ae83b9a6dc806a8b301beb09601202a299d8e4d55734c7e55952388176da2f3.jpg b/data/2025/2504_13xxx/2504.13180/images/6ae83b9a6dc806a8b301beb09601202a299d8e4d55734c7e55952388176da2f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9eb770d72bb56101fab0aa0dd4b3937e05bcbb28 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/6ae83b9a6dc806a8b301beb09601202a299d8e4d55734c7e55952388176da2f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:496e847d113b7d519b8944e8fc06ba5bb72fe67e31847fd48102faa684f22e1a +size 19484 diff --git a/data/2025/2504_13xxx/2504.13180/images/701e00966d2585d20f52c85fd4a84ee611176d8dbb3ce8ee2398f029906d406d.jpg b/data/2025/2504_13xxx/2504.13180/images/701e00966d2585d20f52c85fd4a84ee611176d8dbb3ce8ee2398f029906d406d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0c090ca7a57ec5327e92f57f3553d7f7e19f70c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/701e00966d2585d20f52c85fd4a84ee611176d8dbb3ce8ee2398f029906d406d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:061114963328b6a572da9b7418aa9f4b5c26c8b42b111376eeeb5718e7a26d11 +size 39287 diff --git a/data/2025/2504_13xxx/2504.13180/images/74db19474066235320891f11a947c6133e419da84821eecd7a53417ba79ee176.jpg b/data/2025/2504_13xxx/2504.13180/images/74db19474066235320891f11a947c6133e419da84821eecd7a53417ba79ee176.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74ba88b53f06d5d321d4739d8a69604330e68d94 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/74db19474066235320891f11a947c6133e419da84821eecd7a53417ba79ee176.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d96f4d8d36c4098bda479b1cef4c847bff3e59445cd399ac707f22a6ec6ef51 +size 9006 diff --git a/data/2025/2504_13xxx/2504.13180/images/74e60f80c4d1caa05d26073b5154c8482957d25d0a6ca771bb72b8df194ab130.jpg b/data/2025/2504_13xxx/2504.13180/images/74e60f80c4d1caa05d26073b5154c8482957d25d0a6ca771bb72b8df194ab130.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46c0641f2619331537ad6b937fcd9aef901f86d6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/74e60f80c4d1caa05d26073b5154c8482957d25d0a6ca771bb72b8df194ab130.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef31f59c3d310f2932907b496dd47115d6d9a12115e8c2c89bb960679af2827b +size 20654 diff --git a/data/2025/2504_13xxx/2504.13180/images/7505d157279275cd75e2263900d283280d6a31fed16ec40ff7e0b5246fa8b0eb.jpg b/data/2025/2504_13xxx/2504.13180/images/7505d157279275cd75e2263900d283280d6a31fed16ec40ff7e0b5246fa8b0eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15897c9662239c71d40eed2f5d7344b13cdead47 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/7505d157279275cd75e2263900d283280d6a31fed16ec40ff7e0b5246fa8b0eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4429a78c7356469b426154a39bbb7c2b1e7376410c8f2ef714d788ca4932fb8 +size 635 diff --git a/data/2025/2504_13xxx/2504.13180/images/7a2b9a755d06734efc06641a316b5956bffb59da9244e42a1b4b231e624b84af.jpg b/data/2025/2504_13xxx/2504.13180/images/7a2b9a755d06734efc06641a316b5956bffb59da9244e42a1b4b231e624b84af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45409756d7127ef906069c0b85d133405c93a7cd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/7a2b9a755d06734efc06641a316b5956bffb59da9244e42a1b4b231e624b84af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:001dbbc72e4275c227d48b3073c183444b6285d753e35090b947da986c19f92e +size 39549 diff --git a/data/2025/2504_13xxx/2504.13180/images/7ef9456e3e8bc1c9728a3e962e610a1c30befbc64747dbf0b91c857670438e56.jpg b/data/2025/2504_13xxx/2504.13180/images/7ef9456e3e8bc1c9728a3e962e610a1c30befbc64747dbf0b91c857670438e56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86ef52423a64f83305d11c2eb669c3151c968e07 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/7ef9456e3e8bc1c9728a3e962e610a1c30befbc64747dbf0b91c857670438e56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f4435db5a92310f459bd55b17116c5e3b892f8ee9efc4bbe698c635419d4b46 +size 670 diff --git a/data/2025/2504_13xxx/2504.13180/images/806474d8360c64160660f815fe8d5cc8cc35ffc5e40e6f53ac1bfd5ae88e9da8.jpg b/data/2025/2504_13xxx/2504.13180/images/806474d8360c64160660f815fe8d5cc8cc35ffc5e40e6f53ac1bfd5ae88e9da8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87057659d4439141a949708c8172b01b1a69dacb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/806474d8360c64160660f815fe8d5cc8cc35ffc5e40e6f53ac1bfd5ae88e9da8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be724baa3a8263f67bf8b4417948879e66c35117e02fa9e86c7f0f017d19ac48 +size 9380 diff --git a/data/2025/2504_13xxx/2504.13180/images/828b315a69fb2c08cbb43552a082a1d2df5550d03f5affc2807edeba28365435.jpg b/data/2025/2504_13xxx/2504.13180/images/828b315a69fb2c08cbb43552a082a1d2df5550d03f5affc2807edeba28365435.jpg new file mode 100644 index 0000000000000000000000000000000000000000..193bdeb16667d866e1ddd74d6ffd513dd4b85ed8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/828b315a69fb2c08cbb43552a082a1d2df5550d03f5affc2807edeba28365435.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b19cc746863f262287453d3239e92c6494ef3e17b1df764e51f5bc85f249286 +size 29987 diff --git a/data/2025/2504_13xxx/2504.13180/images/843cf4224cafa9d4d49787a9e773e2c299b0ce83ddfd981d9e5567f64e76f276.jpg b/data/2025/2504_13xxx/2504.13180/images/843cf4224cafa9d4d49787a9e773e2c299b0ce83ddfd981d9e5567f64e76f276.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56ffc1800bcd45ac7bcc2ccff1f781143ae2b33d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/843cf4224cafa9d4d49787a9e773e2c299b0ce83ddfd981d9e5567f64e76f276.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ddd1fcb125948ef6b7975b31ee11ea336a74e3ec15fa53c1b84acacd0615986 +size 39085 diff --git a/data/2025/2504_13xxx/2504.13180/images/86721f53456f8242f459cbc94cfdb8022a9218ecb8bb1279d08b577df5f60a33.jpg b/data/2025/2504_13xxx/2504.13180/images/86721f53456f8242f459cbc94cfdb8022a9218ecb8bb1279d08b577df5f60a33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cff91dc10d23674771b8a162f600eee3ca9e1f53 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/86721f53456f8242f459cbc94cfdb8022a9218ecb8bb1279d08b577df5f60a33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0122444e7deb5483ddf0ae534549579bf4e646534b0b7cb4b078823f7d368665 +size 755 diff --git a/data/2025/2504_13xxx/2504.13180/images/87fd1bd30b14c09e549e689f3d4cafff2807a92481fbf1c5bf4db17ddcec181b.jpg b/data/2025/2504_13xxx/2504.13180/images/87fd1bd30b14c09e549e689f3d4cafff2807a92481fbf1c5bf4db17ddcec181b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e38bd2fa0efcbf83ae3e78c4c9d148bf9a433b46 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/87fd1bd30b14c09e549e689f3d4cafff2807a92481fbf1c5bf4db17ddcec181b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14f26d2ef421a477f8574494424ba3067a43bb25c46a7664b06c2620dde52f75 +size 68062 diff --git a/data/2025/2504_13xxx/2504.13180/images/88f1750edc60d7fca24ce4b6116dcb56895c0a14ae4539375ffb52be7846b390.jpg b/data/2025/2504_13xxx/2504.13180/images/88f1750edc60d7fca24ce4b6116dcb56895c0a14ae4539375ffb52be7846b390.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09cfc1b2b0fcec17e90318fa22a5d812a9e2e8ce --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/88f1750edc60d7fca24ce4b6116dcb56895c0a14ae4539375ffb52be7846b390.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ee1fec40877966018dcefe35e02a1693c6434c9addf89e41b7e5886d700b666 +size 39514 diff --git a/data/2025/2504_13xxx/2504.13180/images/89d0d7a3c21c206e56aaaa11c9c5e7d79f945c9b65cae8a8e0fec0f8e1f86c4a.jpg b/data/2025/2504_13xxx/2504.13180/images/89d0d7a3c21c206e56aaaa11c9c5e7d79f945c9b65cae8a8e0fec0f8e1f86c4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b14f9c04d81a5e9cb5678227fc73f5540dfceaad --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/89d0d7a3c21c206e56aaaa11c9c5e7d79f945c9b65cae8a8e0fec0f8e1f86c4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cf016bac1a770b1139819728d66d57be4b1c7b4b37089cd05bdc21e497cfd2e +size 667 diff --git a/data/2025/2504_13xxx/2504.13180/images/8e48e6d52f6bcbb64acd39bed728f3b5432cf2832b70f126f223cba10bc1f95c.jpg b/data/2025/2504_13xxx/2504.13180/images/8e48e6d52f6bcbb64acd39bed728f3b5432cf2832b70f126f223cba10bc1f95c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcd189431e8ba3e7456f7c1ebdac17731104f499 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/8e48e6d52f6bcbb64acd39bed728f3b5432cf2832b70f126f223cba10bc1f95c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1de52a02bc5a57fa7d677da9e42b9f3852dd1c2dd4f1e1577b9df3a8008853b2 +size 9004 diff --git a/data/2025/2504_13xxx/2504.13180/images/8e79177a0ff95b0e8f386b3c911fa139e72510a8be6c7f623a43f333b8042601.jpg b/data/2025/2504_13xxx/2504.13180/images/8e79177a0ff95b0e8f386b3c911fa139e72510a8be6c7f623a43f333b8042601.jpg new file mode 100644 index 0000000000000000000000000000000000000000..581660b0b9b19de8099646fe6b8cd339950efb53 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/8e79177a0ff95b0e8f386b3c911fa139e72510a8be6c7f623a43f333b8042601.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e55ce26651e975c31e33d82a9e709fe023d79c5c51a1cc718be0bb62b6ad2935 +size 13643 diff --git a/data/2025/2504_13xxx/2504.13180/images/91ce2b0c7edafe9f2cdb86fd1bac7e9af617712241bb64d90283a784a86583ea.jpg b/data/2025/2504_13xxx/2504.13180/images/91ce2b0c7edafe9f2cdb86fd1bac7e9af617712241bb64d90283a784a86583ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fac40f6670e8872beb26c28aaafc3df2719ca5eb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/91ce2b0c7edafe9f2cdb86fd1bac7e9af617712241bb64d90283a784a86583ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60a0cd60d639b9bcd3a8d2d053825e37aef0a2d1ad7bd7b096c74f95299a3af3 +size 643 diff --git a/data/2025/2504_13xxx/2504.13180/images/9709cd79ff83abbab0f0a275a7f68a66ecc1857a6d9ed49ff9b58854b7d5d41e.jpg b/data/2025/2504_13xxx/2504.13180/images/9709cd79ff83abbab0f0a275a7f68a66ecc1857a6d9ed49ff9b58854b7d5d41e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51610ee93dd86f7d9ae3c17aac64cc568c54921a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/9709cd79ff83abbab0f0a275a7f68a66ecc1857a6d9ed49ff9b58854b7d5d41e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c49e4ff604cd270c2cace2b3709c2d859b5b059c0da84fc3e6897255ec60751b +size 37827 diff --git a/data/2025/2504_13xxx/2504.13180/images/97cf383d4d374e8b3977caada0df1ed59c91b9b0a1f796452a8293c81320b1d3.jpg b/data/2025/2504_13xxx/2504.13180/images/97cf383d4d374e8b3977caada0df1ed59c91b9b0a1f796452a8293c81320b1d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86ef52423a64f83305d11c2eb669c3151c968e07 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/97cf383d4d374e8b3977caada0df1ed59c91b9b0a1f796452a8293c81320b1d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f4435db5a92310f459bd55b17116c5e3b892f8ee9efc4bbe698c635419d4b46 +size 670 diff --git a/data/2025/2504_13xxx/2504.13180/images/97d9635db506762758a4df3dbf6aa06a6fcab3df945535f5fe1735ee297ab44f.jpg b/data/2025/2504_13xxx/2504.13180/images/97d9635db506762758a4df3dbf6aa06a6fcab3df945535f5fe1735ee297ab44f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c21d1e33c5aa4e80ff20de88272a10acf09ee903 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/97d9635db506762758a4df3dbf6aa06a6fcab3df945535f5fe1735ee297ab44f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43d9da90942a937e9dae505cec449c28c31307d9e477ef52638aeeb322a2248e +size 746 diff --git a/data/2025/2504_13xxx/2504.13180/images/9864828c246fe26b65456150ba8bd91c706c9812d190f4a5efca447c9728f7af.jpg b/data/2025/2504_13xxx/2504.13180/images/9864828c246fe26b65456150ba8bd91c706c9812d190f4a5efca447c9728f7af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86ef52423a64f83305d11c2eb669c3151c968e07 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/9864828c246fe26b65456150ba8bd91c706c9812d190f4a5efca447c9728f7af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f4435db5a92310f459bd55b17116c5e3b892f8ee9efc4bbe698c635419d4b46 +size 670 diff --git a/data/2025/2504_13xxx/2504.13180/images/9a4eb11215c651515e62e6429fc7934bc585298496e5b030e5de5e0d8c25b3b3.jpg b/data/2025/2504_13xxx/2504.13180/images/9a4eb11215c651515e62e6429fc7934bc585298496e5b030e5de5e0d8c25b3b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04dcc7f73b1941bb1cecef35adf8ea7ff9206ce2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/9a4eb11215c651515e62e6429fc7934bc585298496e5b030e5de5e0d8c25b3b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:331be06957087bfa42376333bc93907405a0912a1d3ed7fcb64dea9c7d65a364 +size 21379 diff --git a/data/2025/2504_13xxx/2504.13180/images/9c9906e83a225a513caff2dfdd4843d2b7b4a39adfecf3ff65b862754c2765b7.jpg b/data/2025/2504_13xxx/2504.13180/images/9c9906e83a225a513caff2dfdd4843d2b7b4a39adfecf3ff65b862754c2765b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0e2561388e97262d9a35455e4032181f2989d45 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/9c9906e83a225a513caff2dfdd4843d2b7b4a39adfecf3ff65b862754c2765b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2caced45a88acba5215c08d32f2e3d404dbe8c4073f30d9127e4b9ca46f92b06 +size 23889 diff --git a/data/2025/2504_13xxx/2504.13180/images/9d17dc536750b2f00b965dc2e9b92faf6895b4a18767e1761d0d1e8226c4a309.jpg b/data/2025/2504_13xxx/2504.13180/images/9d17dc536750b2f00b965dc2e9b92faf6895b4a18767e1761d0d1e8226c4a309.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8cf43c59fa81c42116bc342f538a34e458d07548 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/9d17dc536750b2f00b965dc2e9b92faf6895b4a18767e1761d0d1e8226c4a309.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a871dbe66e410a7c98bce47aec0d1d4c1bff6d5389a39815f1bf84557e9277c4 +size 19575 diff --git a/data/2025/2504_13xxx/2504.13180/images/a294ad701203332c43e130b55e4a3017cfe651e413ef4b236c2112092d14e26f.jpg b/data/2025/2504_13xxx/2504.13180/images/a294ad701203332c43e130b55e4a3017cfe651e413ef4b236c2112092d14e26f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70dc34d43efbb03a6e91378437948191910b665e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/a294ad701203332c43e130b55e4a3017cfe651e413ef4b236c2112092d14e26f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:096b13c83f2ba018b1e0615a672122ca04a30d52adaab08221d9eb9256e06ac4 +size 9070 diff --git a/data/2025/2504_13xxx/2504.13180/images/a99f19209d3bde08b3fb2bba24b4c3b3c12a36b8ddbc73dce7c33808a90a5be5.jpg b/data/2025/2504_13xxx/2504.13180/images/a99f19209d3bde08b3fb2bba24b4c3b3c12a36b8ddbc73dce7c33808a90a5be5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19431c24c94da93365e856229454944c6d6bcc2b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/a99f19209d3bde08b3fb2bba24b4c3b3c12a36b8ddbc73dce7c33808a90a5be5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46447a70d0e08b9bf619615df9c31f344f1a662baeaebd066e76e382fc0ff650 +size 9235 diff --git a/data/2025/2504_13xxx/2504.13180/images/a9c3bdc1ccd219d5d46e0c496746e9f91aeebbcd86a9846587428409124b6e42.jpg b/data/2025/2504_13xxx/2504.13180/images/a9c3bdc1ccd219d5d46e0c496746e9f91aeebbcd86a9846587428409124b6e42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53163bf750ba993d90b64e95f0395b8f7b4e35b0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/a9c3bdc1ccd219d5d46e0c496746e9f91aeebbcd86a9846587428409124b6e42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9513edb5982ce706a2e2909a0649698005ba0e7e3d280fb587ef8cd81e8f9ab +size 9569 diff --git a/data/2025/2504_13xxx/2504.13180/images/aa12bcb8edff9ba1ae7ef304cae20debee36198b18474ddd9e63ef6b76c6cf17.jpg b/data/2025/2504_13xxx/2504.13180/images/aa12bcb8edff9ba1ae7ef304cae20debee36198b18474ddd9e63ef6b76c6cf17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15c901af285b3ec86ef3034a03002a150163d03b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/aa12bcb8edff9ba1ae7ef304cae20debee36198b18474ddd9e63ef6b76c6cf17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aac4c7e0ce184bd7d7bae08a0a76466f0bb0da78d2867393dd6dc372d4efe44 +size 29982 diff --git a/data/2025/2504_13xxx/2504.13180/images/ab9bd49c00e0ac2c61fb94363fcc5cec51c4b1cbfe090cb6a415f58a3eb577ea.jpg b/data/2025/2504_13xxx/2504.13180/images/ab9bd49c00e0ac2c61fb94363fcc5cec51c4b1cbfe090cb6a415f58a3eb577ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87a12e1c85c8078ec8ac5312ae5da4080b893252 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ab9bd49c00e0ac2c61fb94363fcc5cec51c4b1cbfe090cb6a415f58a3eb577ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d25a6a349f5e11df4b1c545a36e3c37d6d92e6825a821e9df2ff04863b333e46 +size 183346 diff --git a/data/2025/2504_13xxx/2504.13180/images/ac7fd38db459c2e2517754cba41a8e08d3c9cf19bf27aa7a1ff5269befcb79a3.jpg b/data/2025/2504_13xxx/2504.13180/images/ac7fd38db459c2e2517754cba41a8e08d3c9cf19bf27aa7a1ff5269befcb79a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cee9a055045d8e2ba936c8f14fea18ac2585d0d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ac7fd38db459c2e2517754cba41a8e08d3c9cf19bf27aa7a1ff5269befcb79a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac3e3f61d0ec6eedec0e29072ae6d3a53c55290ced31dda7aac80d7502806c98 +size 9860 diff --git a/data/2025/2504_13xxx/2504.13180/images/ae63e4f0bb0e73421d1c086dfad90e8ed1ab8c8c8cc45e4aad8f14002bf60f93.jpg b/data/2025/2504_13xxx/2504.13180/images/ae63e4f0bb0e73421d1c086dfad90e8ed1ab8c8c8cc45e4aad8f14002bf60f93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b6e7891f22b03b7ed819da48cd8d162d755f519 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ae63e4f0bb0e73421d1c086dfad90e8ed1ab8c8c8cc45e4aad8f14002bf60f93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8a0389abcbfaedc4024e4f979c59ec55d855c27525e70616c82f788b8f7a3d0 +size 635 diff --git a/data/2025/2504_13xxx/2504.13180/images/b278c198dd92d6a581093a9fa531b979cd6e4f3bd04c4e44384c4692ee3b879c.jpg b/data/2025/2504_13xxx/2504.13180/images/b278c198dd92d6a581093a9fa531b979cd6e4f3bd04c4e44384c4692ee3b879c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d42cad2ddeec19fa8b3c4b80048c4b9b38f11f0d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/b278c198dd92d6a581093a9fa531b979cd6e4f3bd04c4e44384c4692ee3b879c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fe948d26c4df40712aa1340270072abe2db0b0689cfb9aee33bbd959ab58586 +size 652 diff --git a/data/2025/2504_13xxx/2504.13180/images/b28c607dd051eb3686f994e025f772f462e1f5c4155ba9d5120a264759eec9a0.jpg b/data/2025/2504_13xxx/2504.13180/images/b28c607dd051eb3686f994e025f772f462e1f5c4155ba9d5120a264759eec9a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb6b818e96a8650cc1ad49b27a6ef6492edbf09a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/b28c607dd051eb3686f994e025f772f462e1f5c4155ba9d5120a264759eec9a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e2427659a550ea4d7d33efaf8acf1043b06cfa92a56fdd27f06e977be953632 +size 753 diff --git a/data/2025/2504_13xxx/2504.13180/images/b83cdccde56ac234bb0f2e3c8ad905aa0dd7ba0225a99cc30d9278d0de75d545.jpg b/data/2025/2504_13xxx/2504.13180/images/b83cdccde56ac234bb0f2e3c8ad905aa0dd7ba0225a99cc30d9278d0de75d545.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe3c2a145e2e28023a3c9b400ea1ce9cbaab5f7d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/b83cdccde56ac234bb0f2e3c8ad905aa0dd7ba0225a99cc30d9278d0de75d545.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbbd8e59ef6f627769d0b44e14d47b1879cc5655b08512f7d7ff8e25b03e5ad0 +size 48744 diff --git a/data/2025/2504_13xxx/2504.13180/images/bc10df81fb05dadf42f2023d44ad0bb57d60963ed328118c67148d5817dc5522.jpg b/data/2025/2504_13xxx/2504.13180/images/bc10df81fb05dadf42f2023d44ad0bb57d60963ed328118c67148d5817dc5522.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de77737baec3e5ed1bac4793ee0bf1f5afaf9a7a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/bc10df81fb05dadf42f2023d44ad0bb57d60963ed328118c67148d5817dc5522.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e294b1052ce70ec0970949f1bd8f581055f0930b439d88da91a74974f59f66bb +size 759 diff --git a/data/2025/2504_13xxx/2504.13180/images/c7494f8c14fe238b6ecea4422a302a3709fa4f8661f97f8802587ed18b99d19d.jpg b/data/2025/2504_13xxx/2504.13180/images/c7494f8c14fe238b6ecea4422a302a3709fa4f8661f97f8802587ed18b99d19d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84eed9e9e7f526b700e20ef0738a81714568a1ee --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/c7494f8c14fe238b6ecea4422a302a3709fa4f8661f97f8802587ed18b99d19d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b81cdd0343fb80bdb3648f0ef13378ecbf5946bcb2acb42ffcdedf570b0a4ce8 +size 9699 diff --git a/data/2025/2504_13xxx/2504.13180/images/cccf7fbf94228c5846ea07593981fd1e54fae7f5523b10ef64fbef5444020990.jpg b/data/2025/2504_13xxx/2504.13180/images/cccf7fbf94228c5846ea07593981fd1e54fae7f5523b10ef64fbef5444020990.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44373cb2ddb7ad630f216ad47de10722879b973a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/cccf7fbf94228c5846ea07593981fd1e54fae7f5523b10ef64fbef5444020990.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c36ba51509720ff2529054513d06070678eea0eae564ca81c5ea62c663045e9b +size 14752 diff --git a/data/2025/2504_13xxx/2504.13180/images/cd1b284ae8d1f3f30f8088b18b59622d25aa293d59cf4db1f85924f860087e5e.jpg b/data/2025/2504_13xxx/2504.13180/images/cd1b284ae8d1f3f30f8088b18b59622d25aa293d59cf4db1f85924f860087e5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..872a3b5c2891046acd45efb2aad9f97bc985093e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/cd1b284ae8d1f3f30f8088b18b59622d25aa293d59cf4db1f85924f860087e5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d603ff3e260c363755a01efddf99db876fa1a85c0d55bd69be8fd61d156e1255 +size 47635 diff --git a/data/2025/2504_13xxx/2504.13180/images/ce0ddd8b52a979c5ae05cf42d242fd300597fa9681721f1050bf3243c0367b61.jpg b/data/2025/2504_13xxx/2504.13180/images/ce0ddd8b52a979c5ae05cf42d242fd300597fa9681721f1050bf3243c0367b61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d33eafab9b8b48eae532b5d8aff59092683571d9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ce0ddd8b52a979c5ae05cf42d242fd300597fa9681721f1050bf3243c0367b61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adced16501d046c001afd59218e4740787e594bac2d252a1650aa8f26959cd38 +size 41048 diff --git a/data/2025/2504_13xxx/2504.13180/images/d030c186456dc1b4dfd47039cf3c8be6b9cf516ecc55f24b4303978981f96e51.jpg b/data/2025/2504_13xxx/2504.13180/images/d030c186456dc1b4dfd47039cf3c8be6b9cf516ecc55f24b4303978981f96e51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ab898691b26beeb4d5e55ecbbb8f57e3d79da77 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/d030c186456dc1b4dfd47039cf3c8be6b9cf516ecc55f24b4303978981f96e51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6b6b078c69f0bb26739dd27655f0d2f2787f16a65b17ee46d13ad5e816a0d6f +size 21903 diff --git a/data/2025/2504_13xxx/2504.13180/images/d41eaa8e359ef64abc050548e3649c0aeeb3674038d0e26555c3356e5303b499.jpg b/data/2025/2504_13xxx/2504.13180/images/d41eaa8e359ef64abc050548e3649c0aeeb3674038d0e26555c3356e5303b499.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fa0512df2947ec13c6f09e1fc08e9bdddd3c0e7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/d41eaa8e359ef64abc050548e3649c0aeeb3674038d0e26555c3356e5303b499.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc4b554368a6313e6f5279237b2a37b2cce4fddfdb1ba02f216d0c058c505be0 +size 670 diff --git a/data/2025/2504_13xxx/2504.13180/images/dce981b90649f3333bcbeecafe451dd31d77240bd5b444c91b0d0b02ac2b5874.jpg b/data/2025/2504_13xxx/2504.13180/images/dce981b90649f3333bcbeecafe451dd31d77240bd5b444c91b0d0b02ac2b5874.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c74e2ffd5b6831988217c6d8e79430cb123ce8b3 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/dce981b90649f3333bcbeecafe451dd31d77240bd5b444c91b0d0b02ac2b5874.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6257931adb892afe82710390cedb073b761dd355b63fdce92104408dac62c9f3 +size 62933 diff --git a/data/2025/2504_13xxx/2504.13180/images/deb55031b0dcd69c607f38cdac47b1dcfd24e19c9457a8c15e649704593f1dbe.jpg b/data/2025/2504_13xxx/2504.13180/images/deb55031b0dcd69c607f38cdac47b1dcfd24e19c9457a8c15e649704593f1dbe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fcd909e50422f1b25d9eb4cf9e7ac41982c9f87 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/deb55031b0dcd69c607f38cdac47b1dcfd24e19c9457a8c15e649704593f1dbe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b0064d68726c6d56fb7180d08648d097a3132d65dad655ee4cbaa9f9047db0e +size 24846 diff --git a/data/2025/2504_13xxx/2504.13180/images/e412ee6336c6f3f3ecb37e27ff8cad8c9ab87a00109169edfb9921efea2bb8d3.jpg b/data/2025/2504_13xxx/2504.13180/images/e412ee6336c6f3f3ecb37e27ff8cad8c9ab87a00109169edfb9921efea2bb8d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f52890da9d548ecfdea38d7ba15ec6d8ec0248ac --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/e412ee6336c6f3f3ecb37e27ff8cad8c9ab87a00109169edfb9921efea2bb8d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4240a85fc1eef22bfa36aaecd2a166a8e3ca5b03027d02c141663f994c9aad59 +size 18060 diff --git a/data/2025/2504_13xxx/2504.13180/images/e4479baec3e3978f3bfd89cfb5cc2991b1e8a1f5648dde908b20e0c4df1874d0.jpg b/data/2025/2504_13xxx/2504.13180/images/e4479baec3e3978f3bfd89cfb5cc2991b1e8a1f5648dde908b20e0c4df1874d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a175820fa031fbea75533a0fade2a37664c024b0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/e4479baec3e3978f3bfd89cfb5cc2991b1e8a1f5648dde908b20e0c4df1874d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57c5a7c8a9000d8cfe66d38ed001162a8afbfc46d061651f3bca60c5f474a015 +size 33661 diff --git a/data/2025/2504_13xxx/2504.13180/images/e6fee9284c2eaca7f5a2d382d7a63ae2cdd7e4e337d5878516729ae373c966e7.jpg b/data/2025/2504_13xxx/2504.13180/images/e6fee9284c2eaca7f5a2d382d7a63ae2cdd7e4e337d5878516729ae373c966e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c4154fcfa5af0709e55153323ed472a15dd8f78 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/e6fee9284c2eaca7f5a2d382d7a63ae2cdd7e4e337d5878516729ae373c966e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f8bbf2846dbf2c6428ab633365986db783549c994b616aadcec5b774f837ba2 +size 20486 diff --git a/data/2025/2504_13xxx/2504.13180/images/ea8890b185233495dcde92782d8a3178b0051f492f0303f6c51666a81220f5fb.jpg b/data/2025/2504_13xxx/2504.13180/images/ea8890b185233495dcde92782d8a3178b0051f492f0303f6c51666a81220f5fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6172c3f87c0ddc10565bd2ff1ada716a3b3f624 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ea8890b185233495dcde92782d8a3178b0051f492f0303f6c51666a81220f5fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8385d9050c18880e7081322d8c1a7e2b487398b2d7a22b4a2a6ec334243dd54 +size 10338 diff --git a/data/2025/2504_13xxx/2504.13180/images/eca9333b1282cfb47b5431e79283bb3bfec193130f5dbfeedb73b9e5184df31f.jpg b/data/2025/2504_13xxx/2504.13180/images/eca9333b1282cfb47b5431e79283bb3bfec193130f5dbfeedb73b9e5184df31f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32953fbf13e456a3e33ac5a208d1b8bb6206ac1f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/eca9333b1282cfb47b5431e79283bb3bfec193130f5dbfeedb73b9e5184df31f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e6986c8534beceb3396feebc61ad53445a10ba12926a63f934338e62a91c5d +size 4180 diff --git a/data/2025/2504_13xxx/2504.13180/images/ed04d39d909fe72eeb12444135a8e509aba0d6b6dc41389801c5e33f2f769e55.jpg b/data/2025/2504_13xxx/2504.13180/images/ed04d39d909fe72eeb12444135a8e509aba0d6b6dc41389801c5e33f2f769e55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a994a100a76178a599b08e1f1e58320771b294d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ed04d39d909fe72eeb12444135a8e509aba0d6b6dc41389801c5e33f2f769e55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfce90d3db4fc85e8590e557ff9705c68e3c43f25b023928c76b6c52c359f1d5 +size 16256 diff --git a/data/2025/2504_13xxx/2504.13180/images/edd4dba1e3710253faa97eb20998e70076ec12a6c3c6fb22067fbab64044c139.jpg b/data/2025/2504_13xxx/2504.13180/images/edd4dba1e3710253faa97eb20998e70076ec12a6c3c6fb22067fbab64044c139.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7758b175d39d549bfead7559d9daacadce616ff --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/edd4dba1e3710253faa97eb20998e70076ec12a6c3c6fb22067fbab64044c139.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b8554beb61c1e3975a783012edbeb3b89cca9ed925df3d6de10d75eeda1125c +size 128698 diff --git a/data/2025/2504_13xxx/2504.13180/images/efa02aa4bae3e91c46df6512d791277416bd6ded15e1faf03355c41f6db2cee2.jpg b/data/2025/2504_13xxx/2504.13180/images/efa02aa4bae3e91c46df6512d791277416bd6ded15e1faf03355c41f6db2cee2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..462c0a0853ae8e79e6442056c7a9b17502e511e5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/efa02aa4bae3e91c46df6512d791277416bd6ded15e1faf03355c41f6db2cee2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2de6a5603d4fec09b00b2af6a9a9fbcda37a730d81c14b339369d2d8bf4a2bfc +size 49870 diff --git a/data/2025/2504_13xxx/2504.13180/images/f35c016082e347045eb81156cdaf9761cb71b628a67cb8a1fc750df128649a34.jpg b/data/2025/2504_13xxx/2504.13180/images/f35c016082e347045eb81156cdaf9761cb71b628a67cb8a1fc750df128649a34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20a92290c517177cc75f4308207d7574635a9240 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/f35c016082e347045eb81156cdaf9761cb71b628a67cb8a1fc750df128649a34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37a189d4a2d7c62891ef6e50c010cca6425c0af91d6561dfb91bd1cced0ae2c8 +size 65128 diff --git a/data/2025/2504_13xxx/2504.13180/images/f5228af189058077b100ec6c0dc4d98c7c57510f2dd8d1516560b8cd3d8e1e1d.jpg b/data/2025/2504_13xxx/2504.13180/images/f5228af189058077b100ec6c0dc4d98c7c57510f2dd8d1516560b8cd3d8e1e1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b6039a253b73f2ec9c7c7f538114f2f604fe97d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/f5228af189058077b100ec6c0dc4d98c7c57510f2dd8d1516560b8cd3d8e1e1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2efa0949b9161194ed0df79404f6bdfc49e1efa1abaab0ac3f27dd62bc54d18a +size 47049 diff --git a/data/2025/2504_13xxx/2504.13180/images/f8993a3092ffb9f6800aa080896e31690641b37cada9f9291ff4057e8ca301a8.jpg b/data/2025/2504_13xxx/2504.13180/images/f8993a3092ffb9f6800aa080896e31690641b37cada9f9291ff4057e8ca301a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a3b69f89b8f68b1fa6319641c545276e2fb0bfe --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/f8993a3092ffb9f6800aa080896e31690641b37cada9f9291ff4057e8ca301a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc7fec77e5b684304350f82437df4dcf6c9089612762757e1fa56953e9ceb0f6 +size 14788 diff --git a/data/2025/2504_13xxx/2504.13180/images/fb955c1df24e9112822420882dbfd46afea779896098cba24e5b220c768b3eac.jpg b/data/2025/2504_13xxx/2504.13180/images/fb955c1df24e9112822420882dbfd46afea779896098cba24e5b220c768b3eac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e3f523c2d5ee66d6ae50551f486b885ff2ff753 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/fb955c1df24e9112822420882dbfd46afea779896098cba24e5b220c768b3eac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:674e9fcd62138a516d6d8f55a6db080ca5c66d37a2ff3f66df26e2bb456b1aea +size 8312 diff --git a/data/2025/2504_13xxx/2504.13180/images/ffd629f7f7e75549359fde7427e50e5d3bdbadedeb27a25b9dcc7b169a3c6815.jpg b/data/2025/2504_13xxx/2504.13180/images/ffd629f7f7e75549359fde7427e50e5d3bdbadedeb27a25b9dcc7b169a3c6815.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c89381dd512d03f442196373e2bf349b9bdc67bc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/images/ffd629f7f7e75549359fde7427e50e5d3bdbadedeb27a25b9dcc7b169a3c6815.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e11113a8588ba4d62dfc82d888b68cc577c0ecc81ffae702ce50dad822dc31e +size 743 diff --git a/data/2025/2504_13xxx/2504.13180/layout.json b/data/2025/2504_13xxx/2504.13180/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..4899ab8a22a28c47dbd29676c1fba4a740ec4f5c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13180/layout.json @@ -0,0 +1,32647 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 121, + 97, + 491, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 97, + 491, + 138 + ], + "spans": [ + { + "bbox": [ + 121, + 97, + 491, + 138 + ], + "type": "text", + "content": "PerceptionLM: Open-Access Data and Models for Detailed Visual Understanding" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "spans": [ + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": "Jang Hyun Cho" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,2,\\ast,\\dagger}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Andrea Madotto" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Effrosyni Mavroudi" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Triantafyllos Afouras" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Tushar Nagarajan" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Muhammad Maaz" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{3,\\ast,\\dagger}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Yale Song" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Tengyu Ma" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Shuming Hu" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Suyog Jain" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Miguel Martin" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Huiyu Wang" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Hanoona Rasheed" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{3,\\dagger}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Peize Sun" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Po-Yao Huang" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Daniel Bolya" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Nikhila Ravi" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Shashank Jain" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Tammy Stark" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Shane Moon" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Babak Damavandi" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Vivian Lee" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Andrew Westbury" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Salman Khan" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Philipp Krähenbuhl" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Piotr Dólar" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Lorenzo Torresani" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\star}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Kristen Grauman" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,2,\\star}" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "text", + "content": ", Christoph Feichtenhofer" + }, + { + "bbox": [ + 112, + 163, + 511, + 247 + ], + "type": "inline_equation", + "content": "^{1,\\star}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "spans": [ + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "text", + "content": "Meta FAIR " + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "text", + "content": "UT Austin " + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "text", + "content": "MBZUAI " + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 113, + 251, + 370, + 265 + ], + "type": "text", + "content": "Meta Reality Labs" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 269, + 419, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 269, + 419, + 283 + ], + "spans": [ + { + "bbox": [ + 113, + 269, + 419, + 283 + ], + "type": "text", + "content": "*Joint first author †Work done during internships at Meta *Project lead" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 308, + 329, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 308, + 329, + 321 + ], + "spans": [ + { + "bbox": [ + 281, + 308, + 329, + 321 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 333, + 470, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 333, + 470, + 510 + ], + "spans": [ + { + "bbox": [ + 140, + 333, + 470, + 510 + ], + "type": "text", + "content": "Vision-language models are integral to computer vision research, yet many high-performing models remain closed-source, obscuring their data, design and training recipe. The research community has responded by using distillation from black-box models to label training data, achieving strong benchmark results, at the cost of measurable scientific progress. However, without knowing the details of the teacher model and its data sources, scientific progress remains difficult to measure. In this paper, we study building a Perception Language Model (PLM) in a fully open and reproducible framework for transparent research in image and video understanding. We analyze standard training pipelines without distillation from proprietary models and explore large-scale synthetic data to identify critical data gaps, particularly in detailed video understanding. To bridge these gaps, we release 2.8M human-labeled instances of fine-grained video question-answer pairs and spatio-temporally grounded video captions. Additionally, we introduce PLM-VideoBench, a suite for evaluating challenging video understanding tasks focusing on the ability to reason about \"what\", \"where\", \"when\", and \"how\" of a video. We make our work fully reproducible by providing data, training recipes, code & models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 513, + 408, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 513, + 408, + 525 + ], + "spans": [ + { + "bbox": [ + 141, + 513, + 408, + 525 + ], + "type": "text", + "content": "GitHub: https://github.com/facebookresearch/perception_models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 540, + 192, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 192, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 192, + 553 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 564, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 632 + ], + "type": "text", + "content": "Vision-language models (VLMs) are now a key part of computer vision research and are widely used in both academia and industry. Many of the strongest performing VLMs are closed-source, meaning their design, training methods, and the data they use are not publicly shared. To stay competitive, the research community has started to catch up to the proprietary models by using a straightforward approach — distillation from black-box models [1, 2, 3, 4, 5], where proprietary models are directly used to label training data [3, 6, 7], directly leading to strong benchmark results." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 635, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 506, + 715 + ], + "type": "text", + "content": "Although distillation will unlock strong performance, there are two main issues for basic research. First, it makes it hard to track scientific progress. Specifically, we cannot tell if better results on benchmarks are due to advances in model design or training, or simply because the proprietary teacher models were trained on the evaluation sets of widely used benchmarks or internal data collected to resemble them — this information is not available. Second, the heavy reliance on distillation leads to a fundamental misunderstanding of the effectiveness of current methods for training VLMs from scratch. Several key questions remain unanswered, including the significance of each training stage," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 223, + 35, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 223, + 35, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 223, + 35, + 567 + ], + "type": "text", + "content": "arXiv:2504.13180v3 [cs.CV] 23 Jul 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 731, + 152, + 743 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 152, + 743 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 152, + 743 + ], + "type": "text", + "content": "Meta" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 70, + 504, + 154 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 504, + 154 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 504, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 504, + 154 + ], + "type": "image", + "image_path": "47547bc1f66e8b472335e2743482081a23778369dfd135fb268632f3e2c4efde.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 160, + 504, + 191 + ], + "lines": [ + { + "bbox": [ + 104, + 160, + 504, + 191 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 504, + 191 + ], + "type": "text", + "content": "Figure 1: We introduce the largest collection of manually annotated fine-grained activity QA and spatiotemporal captioning data (left panel). Together with this data, we train and release PLM —open and fully reproducible models to facilitate research in vision-language model training (right panel)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 206, + 506, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 206, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 506, + 229 + ], + "type": "text", + "content": "the influence of synthetic data, the data gaps that the research community should prioritize, and which of these gaps are currently being artificially addressed by distillation from proprietary models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 233, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 233, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 504, + 310 + ], + "type": "text", + "content": "To better understand these challenges, we develop the Perception Language Model (PLM), a fully open and reproducible model for transparent research in image and video understanding (Fig. 1 right). PLM consists of a vision encoder with a small scale (<8B parameters) LLM decoder. We start by an analysis of standard training pipelines with available data, without any proprietary model distillation. We investigate large-scale synthetic data and establish key scaling laws to identify critical data gaps that limit video understanding performance, especially for spatio-temporal reasoning and fine-grained understanding tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 316, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 392 + ], + "type": "text", + "content": "To fill these gaps, we create 2.8M high-quality human-labeled instances of fine-grained video QA and spatio-temporally grounded video captions, see Fig. 1. This release is nearly an order of magnitude larger than the largest existing video datasets of each type [8, 9]. Our model, dataset and benchmark push the boundaries of video understanding, and provide a foundation for reproducible and transparent training and evaluation of VLM research. Across 40 image and video benchmarks, we achieve comparable performance with existing state-of-the-art open-weight models (e.g., InternVL2.5 [10]), without distilling from proprietary models, and greatly outperform fully open models (i.e., Molmo [11])." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 411, + 197, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 197, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 197, + 423 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 438, + 506, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 506, + 504 + ], + "type": "text", + "content": "Vision-Language Models. Building on the strengths of large language models (LLMs), several vision-language models (VLMs) have recently been proposed for image understanding [1, 12, 13, 14, 15, 16, 17, 18, 19], video understanding [20, 21, 22, 23, 24, 25, 26, 27] and joint understanding of both images and videos [10, 28, 29, 30]. These works employ several modeling advancements such as dynamic high resolution inputs [12], adaptive token compression [25, 31], and multimodal positional embeddings [30]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 520, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 586 + ], + "type": "text", + "content": "Open source, open data VLMs. Training data is a key component in developing powerful VLMs. Many existing approaches train on proprietary data that is not released to the community [32, 33, 34, 35, 36] or on data generated using proprietary models (e.g., GPT4o) [3], effectively distilling the closed models. Doing so make measuring scientific progress difficult and limits research on how to train VLMs ground-up. Molmo [11] proposes a class of open-data models, however, they are image VLMs trained on relatively small-scale data, limiting their performance as our experiments will show." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 601, + 506, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 506, + 721 + ], + "type": "text", + "content": "VLM Benchmarks. Several benchmarks have been proposed to assess the capabilities of VLMs. Popular image benchmarks cover broad perception and reasoning [37, 38, 39, 40, 41, 42, 43, 44, 19, 45, 46, 47, 48] as well as capabilities like image captioning [49, 50, 51], document/diagram understanding [52, 53, 54, 55, 56, 57, 58, 59, 60, 61], mathematical reasoning [62, 63, 64], visual grounding [65, 66] and hallucination [67, 68]. Popular video benchmarks cover video question answering [20, 8, 69, 70, 71, 72, 73, 74, 75, 76, 77, 22, 78, 79, 80], video captioning [81, 82, 83, 84, 85, 86, 87], and hallucination in videos [88, 89]. Many of these video benchmarks remain image-centric — they have questions that can be answered with a few frames. Video-centric reasoning in benchmarks has been relatively neglected with benchmarks proposed only recently for long video understanding [90, 91, 92, 93, 94, 95, 96, 97, 98] and fine-grained, temporal reasoning [99, 100, 101, 102, 103]. We introduce PLM-VideoBench—a benchmark suite aimed at the core, video" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "centric capabilities that current benchmarks neglect, namely fine-grained activity understanding and spatio-temporally grounded reasoning." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 117, + 210, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 117, + 210, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 210, + 131 + ], + "type": "text", + "content": "3 PLM: Overview" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 146, + 504, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 146, + 504, + 169 + ], + "spans": [ + { + "bbox": [ + 104, + 146, + 504, + 169 + ], + "type": "text", + "content": "In this section, we overview the model, training stages and training data involved in the development of PLM. Please refer to Fig. 8 for a detailed overview and Appendix A for additional details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "text", + "content": "Model. PLM consists of a vision encoder and language decoder, where a pre-trained Perception Encoder (PE) [104] is connected to the Llama 3 [13] language decoder (1B, 3B, or 8B parameters) with a 2-layer MLP projector. We use PE L/14 for Llama3.2 1B and 3B, and PE G/14 for Llama3.1 8B. For image input, PLM incorporates dynamic tiling to support high resolution images for up to 36 tiles of " + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "inline_equation", + "content": "448^{2}" + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "text", + "content": " resolution, where each tile undergoes " + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "text", + "content": " average input, PLM uses 32 frames at " + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "inline_equation", + "content": "448^{2}" + }, + { + "bbox": [ + 104, + 186, + 298, + 319 + ], + "type": "text", + "content": " resolution, v dimensions of each video frame." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 305, + 189, + 503, + 261 + ], + "blocks": [ + { + "bbox": [ + 305, + 189, + 503, + 261 + ], + "lines": [ + { + "bbox": [ + 305, + 189, + 503, + 261 + ], + "spans": [ + { + "bbox": [ + 305, + 189, + 503, + 261 + ], + "type": "table", + "html": "
Stage 1 WarmupStage 2 MidtrainingStage 3 SFT
ModalityImageImage + VideoImage + Video
Data1M Synthetic72M Mix19M Mix
TrainingProjectileFullFull
Downsampling-2 × 22 × 2
Tiles/Frames1/-16/1636/32
", + "image_path": "d030c186456dc1b4dfd47039cf3c8be6b9cf516ecc55f24b4303978981f96e51.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 303, + 266, + 505, + 287 + ], + "lines": [ + { + "bbox": [ + 303, + 266, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 303, + 266, + 505, + 287 + ], + "type": "text", + "content": "Table 1: Summary of three training stages to train PLM. See Appendix Table 7 and Table 8 for data splits." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 303, + 286, + 504, + 318 + ], + "lines": [ + { + "bbox": [ + 303, + 286, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 303, + 286, + 504, + 318 + ], + "type": "text", + "content": "pooling to compress the visual tokens. For video where the same pooling is applied across the spatial" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 337, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 506, + 392 + ], + "type": "text", + "content": "Data. The data used to train the PLM consists of synthetic and human-annotated samples. Synthetic data enhances the general capabilities of PLM, while human-annotated data broadens these capabilities to encompass more complex tasks. Synthetic data is sourced from a diverse array of image and video datasets, covering fundamental VLM capabilities such as OCR, chart/document/diagram understanding, image/video captioning, and visual question answering." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "text", + "content": "We design data engines for each data modality (e.g., natural images, charts, documents, figures, egocentric and exocentric videos) to efficiently scale up, creating " + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "inline_equation", + "content": "\\sim 66.1\\mathrm{M}" + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "text", + "content": " samples (" + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "inline_equation", + "content": "\\S 4" + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "text", + "content": "). The synthetic data can be noisy, but is available at large scale; on the other hand, human-annotated data provides rich, high-quality supervision for image and video tasks. Here, we combine existing human annotations of diverse image and video sources, with our own collected human-annotated data, specifically geared towards fine-grained video understanding and spatio-temporally grounded reasoning (" + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "inline_equation", + "content": "\\S 5" + }, + { + "bbox": [ + 104, + 397, + 505, + 464 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 481, + 293, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 481, + 293, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 293, + 494 + ], + "type": "text", + "content": "Training stages. PLM trains in three stages:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 498, + 504, + 695 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 104, + 498, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 504, + 543 + ], + "type": "text", + "content": "1. **Projector warm-up.** First, we freeze the vision encoder and LLM and only train the vision projector on a small amount of synthetic image data. This warms-up the newly initialized parameters in the projector and improves stability for later stages. We use " + }, + { + "bbox": [ + 104, + 498, + 504, + 543 + ], + "type": "inline_equation", + "content": "1M" + }, + { + "bbox": [ + 104, + 498, + 504, + 543 + ], + "type": "text", + "content": " images from SA-1B [105] with the image captions generated by our data engine (§4)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 547, + 337, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 547, + 337, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 547, + 337, + 624 + ], + "type": "text", + "content": "2. Large-scale midtraining with synthetic data. Next, we train PLM on diverse domains of images and videos at scale, using a maximum of 16 tiles for images and 16 frames for videos. PLM sees around 64.7M images and videos with synthetically generated captions and question-answer pairs. We employ our data engine to scale up synthetic data generation (see §4)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 629, + 337, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 337, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 337, + 695 + ], + "type": "text", + "content": "3. Supervised fine-tuning with human-annotated data. Finally, we train PLM with higher image resolutions and more video frames, using up to 36 tiles for images and 32 frames for videos. In this stage, we tackle more challenging video tasks, including fine-grained QA and spatiotemporally grounded reasoning." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 345, + 549, + 503, + 661 + ], + "blocks": [ + { + "bbox": [ + 345, + 549, + 503, + 661 + ], + "lines": [ + { + "bbox": [ + 345, + 549, + 503, + 661 + ], + "spans": [ + { + "bbox": [ + 345, + 549, + 503, + 661 + ], + "type": "table", + "html": "
SamplesTypeStage
Our Human-annotated (2.87M)
PLM-FGQA2.4MFine-grained3
PLM-STC476.2KR(D)Cap + RTL3
Our Synthetic (66.1M)
Natural Images15.9MCaption1,2,3
Charts & Documents31.9MCaption2,3
Videos Mix17.5MMix.2,3
Ego4D880KCap. + QA2,3
Existing Open Source (6.52M)
Image (92 datasets)5.6MDiverse2,3
Video (27 datasets)920KDiverse2,3
", + "image_path": "e4479baec3e3978f3bfd89cfb5cc2991b1e8a1f5648dde908b20e0c4df1874d0.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 342, + 666, + 505, + 687 + ], + "lines": [ + { + "bbox": [ + 342, + 666, + 505, + 687 + ], + "spans": [ + { + "bbox": [ + 342, + 666, + 505, + 687 + ], + "type": "text", + "content": "Table 2: Summary of the data mix for training PLM. See Table 9 for the full data blend." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "Table 1 shows an overview of our training setup for each stage. Appendix A.1 provides the complete training recipe for each stage, including hyperparameters and data sources." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 326, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 326, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 326, + 85 + ], + "type": "text", + "content": "4 Synthetic Data Generation and Scaling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 184 + ], + "type": "text", + "content": "The predominant paradigm for VLM training is to generate synthetic annotations as cheap alternatives to human-labeled data [1, 106, 30, 107, 10, 11, 15]. Although seemingly promising to get the best results on benchmarks, the majority of such data shared in the community is derived from proprietary models. This trend makes it hard to decouple scientific progress from proprietary distillation impact. In this section, we explore the efficacy of the current paradigm for VLM training in a transparent manner. We design our data engine entirely from open-source models and scale the synthetic data generation to around 66.1M samples of images and videos. We establish the scaling laws of training from synthetic data on standard VLM tasks, including image, OCR/document, and video tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 196, + 185, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 196, + 185, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 185, + 208 + ], + "type": "text", + "content": "4.1 Data Engine" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 216, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 506, + 228 + ], + "type": "text", + "content": "Our data engine is designed to target base capabilities of VLMs for image and video understanding." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": "Image Data Engine. We generate short and long captions, as well as question-answer pairs, for natural images and those containing documents, diagrams, and text recognizable by optical character recognition (OCR). We prompt openly accessible Llama 3 [13] model to produce factual, detailed image captions while minimizing hallucinations. To create informative question-answer pairs, we utilize OCR data, captions, and other metadata, which are fed into the prompt of a text-only LLM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 305, + 504, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 305, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 305, + 504, + 372 + ], + "type": "text", + "content": "Video Data Engine. For videos, we first use an off-the-shelf scene detector [108] to extract video clips of approximately 30 seconds duration. Then, we extract the keyframes and generate frame-level captions using Llama 3, and video captions using our initial PLM trained with Stage 1 and Stage 3 data as shown in Table 2. We then employ an LLM to refine the frame-level and video captions by incorporating existing video metadata (e.g., action labels, time tags) into a cohesive, detailed video-level caption. Similarly, we generate question-answer pairs from the video-level captions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 376, + 506, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 410 + ], + "type": "text", + "content": "The resulting synthetic data is large-scale and diverse – 66.1M samples carefully curated from a variety of image and video sources including natural images, in-the-wild text, chart, figures, documents, egocentric and exocentric videos. Additional details are in Appendix J." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 422, + 274, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 274, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 274, + 434 + ], + "type": "text", + "content": "4.2 Scaling Laws with Synthetic Data" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 442, + 506, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 506, + 455 + ], + "type": "text", + "content": "We examine scaling properties of our synthetic data under controlled setup and establish scaling laws." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 111, + 464, + 501, + 575 + ], + "blocks": [ + { + "bbox": [ + 111, + 464, + 501, + 575 + ], + "lines": [ + { + "bbox": [ + 111, + 464, + 501, + 575 + ], + "spans": [ + { + "bbox": [ + 111, + 464, + 501, + 575 + ], + "type": "image", + "image_path": "ce0ddd8b52a979c5ae05cf42d242fd300597fa9681721f1050bf3243c0367b61.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 586, + 506, + 639 + ], + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 639 + ], + "type": "text", + "content": "Figure 2: Synthetic Scaling Plots. Relationship between Average Error across benchmarks and training compute (in floating-point operations) for various PLM models. We report average errors across Video QA tasks [75, 72, 90, 8, 70, 71], OCR QA tasks [109, 53, 56, 57], and Natural Images tasks [45, 110, 111, 68, 40, 112]. Model's performance using only human-labeled data subset are reported (No Syst.) as well as the actual power-law fit of each subcategory." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 645, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 507, + 723 + ], + "type": "text", + "content": "Setup. To establish power-law relationship between compute and validation-set errors of downstream benchmarks, we vary the scale of synthetic data, language model decoders (1B, 3B, and 8B), vision encoders (300M and 2B), and resolution/number of frames. For each configuration, we train a model with the 66.1M synthetic data from our data engine and 6.5M publicly available human-labeled data, following stage 2 training described in §3. At every 2M samples, we evaluate PLM on three categories of downstream benchmarks (VideoQA, OCR QA, Natural QA), constructed from 20 vision-language understanding benchmarks that provide a comprehensive and general evaluation of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "multi-modal large language models. We compute the pareto frontier of these data points and fit a power law relationship: " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\mathrm{Err.} = (\\beta \\times \\mathrm{FLOP})^{\\alpha}" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " and compare the exponents " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " of the power function as scalability of each setup, where a smaller " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " implies better scaling." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "text", + "content": "Scaling with decoder size. Fig. 2 shows the scaling behavior of PLM across various LLM sizes. We show validation-set errors and training compute on a logarithmic scale, with the black linear line representing the power-law relationship between them. Different colors (green, turquoise, and blue) represent different language model scales (1B, 3B, 8B) while keeping the vision encoder size constant at 300M. As described in the setup section above, we show the power law fit of the pareto frontier in each benchmark category. We also show the results of PLM only trained on 4M human-labeled datasets as baselines, denoted with horizontal lines of each color. The gap from the horizontal line to the data point marks the impact of the synthetic data. Interestingly, all three categories of benchmarks demonstrate clear power-law relationship between compute and average benchmark errors, with the power law exponent " + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "inline_equation", + "content": "(\\alpha)" + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "inline_equation", + "content": "-0.15, -0.20," + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "inline_equation", + "content": "-0.11" + }, + { + "bbox": [ + 104, + 118, + 506, + 251 + ], + "type": "text", + "content": " for Video QA, OCR QA, and Natural Image QA, respectively. In Appendix B, we provide more details and extend the analysis to (1) scaling the encoder size, and (2) scaling the image resolution and video frames." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "type": "text", + "content": "Limitation of synthetic data. In Fig. 3, we evaluate stage 2 on an extended set of video benchmarks. Specifically, we show the result of 7 challenging video tasks on fine-grained activity understanding [97, 100, 89, 101, 99], temporal grounding [113] and long-video reasoning [92]. Unlike generic, high-level understanding (e.g., \"what is happening in this video\"), the \"challenging\" tasks require a thorough understanding of video in space and time, and fine-grained semantic details. As shown, the challenging video tasks (\"HardQA\" in lavender, plum, magenta) show a poor scaling trend " + }, + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "type": "inline_equation", + "content": "(-0.03)" + }, + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "type": "text", + "content": " compared to general video QA " + }, + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "type": "inline_equation", + "content": "(-0.15)" + }, + { + "bbox": [ + 104, + 263, + 338, + 417 + ], + "type": "text", + "content": ". The stark difference between the two power law fits shows that scaling synthetic data is only effective for established, base tasks. Extending VLMs to" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 417, + 504, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 504, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 504, + 439 + ], + "type": "text", + "content": "these more challenging, complex tasks still remain unsolved. Next, we address this challenge with high-quality human-annotated video data, PLM-FGQA and PLM-STC." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 345, + 266, + 504, + 370 + ], + "blocks": [ + { + "bbox": [ + 345, + 266, + 504, + 370 + ], + "lines": [ + { + "bbox": [ + 345, + 266, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 345, + 266, + 504, + 370 + ], + "type": "image", + "image_path": "2a83600a57b71970f5457ecdbd63b416783eb5ed14f72d374d98f922554cea39.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 375, + 506, + 416 + ], + "lines": [ + { + "bbox": [ + 342, + 375, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 342, + 375, + 506, + 416 + ], + "type": "text", + "content": "Figure 3: Limitation of synthetic data. Challenging video tasks (HardQA [97, 100, 89, 101, 99, 113, 92]) do not scale well with synthetic data." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 456, + 317, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 317, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 317, + 470 + ], + "type": "text", + "content": "5 Human-annotated High Quality Data" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 481, + 504, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 504, + 570 + ], + "type": "text", + "content": "As shown in Fig. 3, the current paradigm with synthetic data has run out of steam. Training from tens of millions of synthetically annotated data hardly improves our model on new, challenging video benchmarks. Beyond standard VLM tasks, these benchmarks focus on advanced capabilities such as fine-grained activity understanding, temporal grounding, and long video understanding. Perhaps, the knowledge that these benchmarks examine is simply not present in the initial training set of our data engine nor in existing human-annotated data. Our community lacks high quality datasets for detailed visual understanding to start from, that covers what, where, when, and how of activities in video. To address this gap, we introduce two large-scale, human-annotated video datasets:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 574, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 506, + 674 + ], + "type": "text", + "content": "PLM-FGQA is a fine-grained video QA dataset collected by asking human annotators to watch a short video segment and answer model-generated questions which focus on \"what\" activities humans perform and \"how\" they perform these activities. Question types include fine-grained recognition (action and object), fine-grained temporal perception (direction of movements, repetition counts, hand pose etc.), and fine-grained spatial understanding (object locations and spatial relationships). We use a multi-stage data engine to first extract video segments with salient actions from untrimmed videos through temporal clustering and shot-detection. Next, we generate questions and answers using either a text-only LLM or an early version of PLM. Finally, we refine the answers by asking humans to verify or replace them if they are incorrect, resulting in a high-quality QA pairs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "Overall, we collect 2.4M question answer pairs from various open-access video datasets [114, 115, 116, 117, 118, 83] spanning over 780k unique video clips from diverse domains (e.g., cooking, DIY, carpentry, automotive and bike repair) and viewpoints (egocentric and third-person); refer to Fig. 13 for domain statistics. This is nearly 8 times larger than the size of the largest existing human-annotated" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 85, + 236, + 129 + ], + "blocks": [ + { + "bbox": [ + 201, + 73, + 276, + 82 + ], + "lines": [ + { + "bbox": [ + 201, + 73, + 276, + 82 + ], + "spans": [ + { + "bbox": [ + 201, + 73, + 276, + 82 + ], + "type": "text", + "content": "Fine-grained QA (FGQA)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 85, + 236, + 129 + ], + "lines": [ + { + "bbox": [ + 109, + 85, + 236, + 129 + ], + "spans": [ + { + "bbox": [ + 109, + 85, + 236, + 129 + ], + "type": "image", + "image_path": "1a7f2c5e0b01ddbfc1ebb31508d2b53949e7690e93cf520c512940ee99f3d650.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 134, + 138, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 134, + 138, + 140 + ], + "spans": [ + { + "bbox": [ + 113, + 134, + 138, + 140 + ], + "type": "text", + "content": "Question" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 140, + 231, + 161 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 113, + 140, + 208, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 140, + 208, + 147 + ], + "spans": [ + { + "bbox": [ + 113, + 140, + 208, + 147 + ], + "type": "text", + "content": "How does the person hold the sandpaper?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 114, + 148, + 231, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 148, + 231, + 161 + ], + "spans": [ + { + "bbox": [ + 114, + 148, + 231, + 161 + ], + "type": "text", + "content": "Answer: With their right hand, between the right thumb on one side, fingers on the other side." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 166, + 138, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 166, + 138, + 172 + ], + "spans": [ + { + "bbox": [ + 113, + 166, + 138, + 172 + ], + "type": "text", + "content": "Question" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 172, + 233, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 172, + 233, + 182 + ], + "spans": [ + { + "bbox": [ + 113, + 172, + 233, + 182 + ], + "type": "text", + "content": "In which direction is the person moving the sandpaper? Answer" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 182, + 232, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 182, + 232, + 193 + ], + "spans": [ + { + "bbox": [ + 115, + 182, + 232, + 193 + ], + "type": "text", + "content": "From the bottom of the baluster to the top in a vertical, oscillating motion." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 242, + 86, + 285, + 129 + ], + "blocks": [ + { + "bbox": [ + 242, + 86, + 285, + 129 + ], + "lines": [ + { + "bbox": [ + 242, + 86, + 285, + 129 + ], + "spans": [ + { + "bbox": [ + 242, + 86, + 285, + 129 + ], + "type": "image", + "image_path": "351b84e7e6eab42fbcfc176442e53f21a364c3de841a66578ee280d83a08d964.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 285, + 86, + 326, + 129 + ], + "blocks": [ + { + "bbox": [ + 285, + 86, + 326, + 129 + ], + "lines": [ + { + "bbox": [ + 285, + 86, + 326, + 129 + ], + "spans": [ + { + "bbox": [ + 285, + 86, + 326, + 129 + ], + "type": "image", + "image_path": "eca9333b1282cfb47b5431e79283bb3bfec193130f5dbfeedb73b9e5184df31f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 327, + 86, + 369, + 129 + ], + "blocks": [ + { + "bbox": [ + 327, + 86, + 369, + 129 + ], + "lines": [ + { + "bbox": [ + 327, + 86, + 369, + 129 + ], + "spans": [ + { + "bbox": [ + 327, + 86, + 369, + 129 + ], + "type": "image", + "image_path": "23424f3efaac5d26d36c163a60d6ae225962eedc063b4b408179011888538ac6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 246, + 134, + 270, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 134, + 270, + 140 + ], + "spans": [ + { + "bbox": [ + 246, + 134, + 270, + 140 + ], + "type": "text", + "content": "Question" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 246, + 140, + 361, + 160 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 246, + 140, + 345, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 140, + 345, + 147 + ], + "spans": [ + { + "bbox": [ + 246, + 140, + 345, + 147 + ], + "type": "text", + "content": "How many chakli snacks does the person flip?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 246, + 148, + 361, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 148, + 361, + 160 + ], + "spans": [ + { + "bbox": [ + 246, + 148, + 361, + 160 + ], + "type": "text", + "content": "Answer\nThe person flips three chakki snacks with a long metal skewer." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 246, + 166, + 270, + 171 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 166, + 270, + 171 + ], + "spans": [ + { + "bbox": [ + 246, + 166, + 270, + 171 + ], + "type": "text", + "content": "Question" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 246, + 172, + 361, + 193 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 246, + 172, + 361, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 172, + 361, + 182 + ], + "spans": [ + { + "bbox": [ + 246, + 172, + 361, + 182 + ], + "type": "text", + "content": "Where is the metal skewer located at the beginning? Answer" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 246, + 182, + 360, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 182, + 360, + 193 + ], + "spans": [ + { + "bbox": [ + 246, + 182, + 360, + 193 + ], + "type": "text", + "content": "Resting on top of the pan, which is positioned on the left burner of the portable stove." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 388, + 86, + 503, + 198 + ], + "blocks": [ + { + "bbox": [ + 388, + 86, + 503, + 198 + ], + "lines": [ + { + "bbox": [ + 388, + 86, + 503, + 198 + ], + "spans": [ + { + "bbox": [ + 388, + 86, + 503, + 198 + ], + "type": "image", + "image_path": "15d1e4348b171804a04b545daf5f6f95190b7b83cd493103d526f75cae05e941.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 205, + 506, + 260 + ], + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 260 + ], + "type": "text", + "content": "Figure 4: Overview PLM-FGQA. Examples of question-answer pairs from PLM-FGQA, focusing on fine-grained human activity understanding. PLM-FGQA is approximately 8 times larger than the largest existing human-annotated video QA dataset and addresses a wide range of fine-grained question types that are scarce in existing video QA datasets, such as ones that cover direction of movement, object states, locations and spatial relations." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 273, + 506, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 506, + 328 + ], + "type": "text", + "content": "video QA dataset in the community [91]. Moreover, as illustrated by the breakdown of question types1 in Fig. 4 (top-right), PLM-FGQA contains a large number of annotations about fine-grained details that have been largely missing in existing training video QA datasets [119, 69, 71, 76, 20, 120, 121, 122, 123]. Please refer to Table 16 for comparison with existing datasets Table 17 for dataset examples and Appendix G for further details." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 333, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 333, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 333, + 504, + 431 + ], + "type": "text", + "content": "PLM-STC is a spatio-temporal video captioning dataset that offers detailed activity descriptions for each video. It includes timestamps (\"when\") of each activity and focuses on specific subjects identified by a masklet (\"where\"). We employ a two-stage annotation process to improve efficiency in collecting PLM-STC. In the first stage, annotators select interesting objects that exhibit significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. For segments where the subject is out of frame, we automatically supplement \"out of frame\" caption. In the second stage, a separate set of annotators write temporally localized descriptions of the highlighted subject focusing on the changes in action across time in relation to the whole video." + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 110, + 456, + 236, + 501 + ], + "blocks": [ + { + "bbox": [ + 110, + 456, + 236, + 501 + ], + "lines": [ + { + "bbox": [ + 110, + 456, + 236, + 501 + ], + "spans": [ + { + "bbox": [ + 110, + 456, + 236, + 501 + ], + "type": "image", + "image_path": "5f618a5368f89a021eb81345e3435f2d49ab45856488999908734b930348342e.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 115, + 511, + 162, + 517 + ], + "lines": [ + { + "bbox": [ + 115, + 511, + 162, + 517 + ], + "spans": [ + { + "bbox": [ + 115, + 511, + 162, + 517 + ], + "type": "text", + "content": "[0,11] Out of frame." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 533, + 233, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 533, + 233, + 552 + ], + "spans": [ + { + "bbox": [ + 114, + 533, + 233, + 552 + ], + "type": "text", + "content": "[12, 67] The person wearing a jacket is running on a snow covered ground. She stops and turns to look the other person." + } + ] + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 243, + 456, + 369, + 500 + ], + "blocks": [ + { + "bbox": [ + 257, + 445, + 353, + 453 + ], + "lines": [ + { + "bbox": [ + 257, + 445, + 353, + 453 + ], + "spans": [ + { + "bbox": [ + 257, + 445, + 353, + 453 + ], + "type": "text", + "content": "Spatio-temporal Captions (STC)" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 243, + 456, + 369, + 500 + ], + "lines": [ + { + "bbox": [ + 243, + 456, + 369, + 500 + ], + "spans": [ + { + "bbox": [ + 243, + 456, + 369, + 500 + ], + "type": "image", + "image_path": "3e89be3574960690c64f0b7055c09f35fd1083243414bd350b97a1cb23b8a777.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "bbox": [ + 246, + 511, + 365, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 511, + 365, + 529 + ], + "spans": [ + { + "bbox": [ + 246, + 511, + 365, + 529 + ], + "type": "text", + "content": "[0, 19] The man moves gracefully, using his hand gestures that closely resemble a dance in most of his actions." + } + ] + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 320, + 532, + 332, + 537 + ], + "blocks": [ + { + "bbox": [ + 320, + 532, + 332, + 537 + ], + "lines": [ + { + "bbox": [ + 320, + 532, + 332, + 537 + ], + "spans": [ + { + "bbox": [ + 320, + 532, + 332, + 537 + ], + "type": "image", + "image_path": "3cb34894424b16abcd3d3c152fa48af28bed03b50451ed2a221c53cd3c8b8e04.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 247, + 541, + 354, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 541, + 354, + 548 + ], + "spans": [ + { + "bbox": [ + 247, + 541, + 354, + 548 + ], + "type": "text", + "content": "[20, 31] The person moves from right to left." + } + ] + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 375, + 456, + 501, + 500 + ], + "blocks": [ + { + "bbox": [ + 375, + 456, + 501, + 500 + ], + "lines": [ + { + "bbox": [ + 375, + 456, + 501, + 500 + ], + "spans": [ + { + "bbox": [ + 375, + 456, + 501, + 500 + ], + "type": "image", + "image_path": "6066602844e8b0ad50061ab254751ffb1f532fee4ceadb24ac1ba8e92a6d26a3.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 378, + 503, + 390, + 509 + ], + "blocks": [ + { + "bbox": [ + 378, + 503, + 390, + 509 + ], + "lines": [ + { + "bbox": [ + 378, + 503, + 390, + 509 + ], + "spans": [ + { + "bbox": [ + 378, + 503, + 390, + 509 + ], + "type": "image", + "image_path": "0c0a20a943cefe72f701d79b7f86c02d855115c56f28fa87e454639b1e91c242.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 395, + 503, + 402, + 509 + ], + "blocks": [ + { + "bbox": [ + 395, + 503, + 402, + 509 + ], + "lines": [ + { + "bbox": [ + 395, + 503, + 402, + 509 + ], + "spans": [ + { + "bbox": [ + 395, + 503, + 402, + 509 + ], + "type": "image", + "image_path": "1143283bbc17c530501279b590e48166d0db6a1109e811e196e05fc60f1b5b76.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 403, + 503, + 414, + 509 + ], + "blocks": [ + { + "bbox": [ + 403, + 503, + 414, + 509 + ], + "lines": [ + { + "bbox": [ + 403, + 503, + 414, + 509 + ], + "spans": [ + { + "bbox": [ + 403, + 503, + 414, + 509 + ], + "type": "image", + "image_path": "0255fd220e6ac7c13d170a81727d27bea3e506b3d3069ec1c128a222ce73a63f.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 416, + 503, + 422, + 509 + ], + "blocks": [ + { + "bbox": [ + 416, + 503, + 422, + 509 + ], + "lines": [ + { + "bbox": [ + 416, + 503, + 422, + 509 + ], + "spans": [ + { + "bbox": [ + 416, + 503, + 422, + 509 + ], + "type": "image", + "image_path": "7ef9456e3e8bc1c9728a3e962e610a1c30befbc64747dbf0b91c857670438e56.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 424, + 503, + 430, + 509 + ], + "blocks": [ + { + "bbox": [ + 424, + 503, + 430, + 509 + ], + "lines": [ + { + "bbox": [ + 424, + 503, + 430, + 509 + ], + "spans": [ + { + "bbox": [ + 424, + 503, + 430, + 509 + ], + "type": "image", + "image_path": "97cf383d4d374e8b3977caada0df1ed59c91b9b0a1f796452a8293c81320b1d3.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 435, + 503, + 440, + 509 + ], + "blocks": [ + { + "bbox": [ + 435, + 503, + 440, + 509 + ], + "lines": [ + { + "bbox": [ + 435, + 503, + 440, + 509 + ], + "spans": [ + { + "bbox": [ + 435, + 503, + 440, + 509 + ], + "type": "image", + "image_path": "b278c198dd92d6a581093a9fa531b979cd6e4f3bd04c4e44384c4692ee3b879c.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 443, + 503, + 449, + 509 + ], + "blocks": [ + { + "bbox": [ + 443, + 503, + 449, + 509 + ], + "lines": [ + { + "bbox": [ + 443, + 503, + 449, + 509 + ], + "spans": [ + { + "bbox": [ + 443, + 503, + 449, + 509 + ], + "type": "image", + "image_path": "9864828c246fe26b65456150ba8bd91c706c9812d190f4a5efca447c9728f7af.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 452, + 503, + 459, + 509 + ], + "blocks": [ + { + "bbox": [ + 452, + 503, + 459, + 509 + ], + "lines": [ + { + "bbox": [ + 452, + 503, + 459, + 509 + ], + "spans": [ + { + "bbox": [ + 452, + 503, + 459, + 509 + ], + "type": "image", + "image_path": "d41eaa8e359ef64abc050548e3649c0aeeb3674038d0e26555c3356e5303b499.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 468, + 503, + 474, + 509 + ], + "blocks": [ + { + "bbox": [ + 468, + 503, + 474, + 509 + ], + "lines": [ + { + "bbox": [ + 468, + 503, + 474, + 509 + ], + "spans": [ + { + "bbox": [ + 468, + 503, + 474, + 509 + ], + "type": "image", + "image_path": "29e17118daecdb0236c798256cd130694fef900820dada6615bbaac98b8d8473.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 482, + 503, + 488, + 509 + ], + "blocks": [ + { + "bbox": [ + 482, + 503, + 488, + 509 + ], + "lines": [ + { + "bbox": [ + 482, + 503, + 488, + 509 + ], + "spans": [ + { + "bbox": [ + 482, + 503, + 488, + 509 + ], + "type": "image", + "image_path": "89d0d7a3c21c206e56aaaa11c9c5e7d79f945c9b65cae8a8e0fec0f8e1f86c4a.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "bbox": [ + 378, + 511, + 497, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 511, + 497, + 523 + ], + "spans": [ + { + "bbox": [ + 378, + 511, + 497, + 523 + ], + "type": "text", + "content": "[0, 81] A little girl moves back as a beluga whale approaches her face." + } + ] + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 380, + 533, + 391, + 540 + ], + "blocks": [ + { + "bbox": [ + 380, + 533, + 391, + 540 + ], + "lines": [ + { + "bbox": [ + 380, + 533, + 391, + 540 + ], + "spans": [ + { + "bbox": [ + 380, + 533, + 391, + 540 + ], + "type": "image", + "image_path": "0f31fcadc2545bde6adc001104d669a9c44c08c90a96da858abeba5c2c96a6be.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 395, + 533, + 402, + 540 + ], + "blocks": [ + { + "bbox": [ + 395, + 533, + 402, + 540 + ], + "lines": [ + { + "bbox": [ + 395, + 533, + 402, + 540 + ], + "spans": [ + { + "bbox": [ + 395, + 533, + 402, + 540 + ], + "type": "image", + "image_path": "bc10df81fb05dadf42f2023d44ad0bb57d60963ed328118c67148d5817dc5522.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 403, + 533, + 417, + 540 + ], + "blocks": [ + { + "bbox": [ + 403, + 533, + 417, + 540 + ], + "lines": [ + { + "bbox": [ + 403, + 533, + 417, + 540 + ], + "spans": [ + { + "bbox": [ + 403, + 533, + 417, + 540 + ], + "type": "image", + "image_path": "0634050c5c6c955d91463775c87e845f941b15af2cd726e113fcf6f598984ca1.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 422, + 533, + 433, + 540 + ], + "blocks": [ + { + "bbox": [ + 422, + 533, + 433, + 540 + ], + "lines": [ + { + "bbox": [ + 422, + 533, + 433, + 540 + ], + "spans": [ + { + "bbox": [ + 422, + 533, + 433, + 540 + ], + "type": "image", + "image_path": "596ed4223987ed7563cda1983d1635941fdc699dfd1a300ada8f50b7f5e44453.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 436, + 533, + 449, + 540 + ], + "blocks": [ + { + "bbox": [ + 436, + 533, + 449, + 540 + ], + "lines": [ + { + "bbox": [ + 436, + 533, + 449, + 540 + ], + "spans": [ + { + "bbox": [ + 436, + 533, + 449, + 540 + ], + "type": "image", + "image_path": "00959a907eafab253ebdf64533fd027c7f96ad991275ff48d2062fcf5260ca42.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + } + ], + "index": 48 + }, + { + "type": "image", + "bbox": [ + 452, + 533, + 459, + 540 + ], + "blocks": [ + { + "bbox": [ + 452, + 533, + 459, + 540 + ], + "lines": [ + { + "bbox": [ + 452, + 533, + 459, + 540 + ], + "spans": [ + { + "bbox": [ + 452, + 533, + 459, + 540 + ], + "type": "image", + "image_path": "91ce2b0c7edafe9f2cdb86fd1bac7e9af617712241bb64d90283a784a86583ea.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 468, + 533, + 474, + 540 + ], + "blocks": [ + { + "bbox": [ + 468, + 533, + 474, + 540 + ], + "lines": [ + { + "bbox": [ + 468, + 533, + 474, + 540 + ], + "spans": [ + { + "bbox": [ + 468, + 533, + 474, + 540 + ], + "type": "image", + "image_path": "7505d157279275cd75e2263900d283280d6a31fed16ec40ff7e0b5246fa8b0eb.jpg" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_body" + } + ], + "index": 50 + }, + { + "type": "image", + "bbox": [ + 482, + 533, + 488, + 540 + ], + "blocks": [ + { + "bbox": [ + 482, + 533, + 488, + 540 + ], + "lines": [ + { + "bbox": [ + 482, + 533, + 488, + 540 + ], + "spans": [ + { + "bbox": [ + 482, + 533, + 488, + 540 + ], + "type": "image", + "image_path": "08bee4120d15c4cdbacba31bbe60ee9f1500ebfbdb2f97a74191f10929d65b87.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 379, + 552, + 391, + 557 + ], + "blocks": [ + { + "bbox": [ + 379, + 552, + 391, + 557 + ], + "lines": [ + { + "bbox": [ + 379, + 552, + 391, + 557 + ], + "spans": [ + { + "bbox": [ + 379, + 552, + 391, + 557 + ], + "type": "image", + "image_path": "11534c4770ca3e4df1da4029b256918d26cd41aad652b75a64983e9aad4afe44.jpg" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 573, + 506, + 618 + ], + "lines": [ + { + "bbox": [ + 104, + 573, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 573, + 506, + 618 + ], + "type": "text", + "content": "Figure 5: Overview of PLM-STC. Examples of spatio-temporally grounded captions from PLM-STC, the first dataset to associate each caption both with a temporal interval as well as a high-fps sequence of segmentation masks of the subject - i.e., masklets (compared to just a temporal interval or a sparse sequence of bounding boxes)." + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 395, + 552, + 402, + 557 + ], + "blocks": [ + { + "bbox": [ + 395, + 552, + 402, + 557 + ], + "lines": [ + { + "bbox": [ + 395, + 552, + 402, + 557 + ], + "spans": [ + { + "bbox": [ + 395, + 552, + 402, + 557 + ], + "type": "image", + "image_path": "17c5eb7fd14ddd780b690caf976e0b5d6fbf98b5581472641250019c0586823f.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 403, + 552, + 415, + 557 + ], + "blocks": [ + { + "bbox": [ + 403, + 552, + 415, + 557 + ], + "lines": [ + { + "bbox": [ + 403, + 552, + 415, + 557 + ], + "spans": [ + { + "bbox": [ + 403, + 552, + 415, + 557 + ], + "type": "image", + "image_path": "602809f1913f9496e798db0d1ff6265cb86eca6e2102ca6f99b6a28a2a55ecfb.jpg" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_body" + } + ], + "index": 54 + }, + { + "type": "image", + "bbox": [ + 416, + 552, + 422, + 557 + ], + "blocks": [ + { + "bbox": [ + 416, + 552, + 422, + 557 + ], + "lines": [ + { + "bbox": [ + 416, + 552, + 422, + 557 + ], + "spans": [ + { + "bbox": [ + 416, + 552, + 422, + 557 + ], + "type": "image", + "image_path": "b28c607dd051eb3686f994e025f772f462e1f5c4155ba9d5120a264759eec9a0.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 424, + 552, + 431, + 557 + ], + "blocks": [ + { + "bbox": [ + 424, + 552, + 431, + 557 + ], + "lines": [ + { + "bbox": [ + 424, + 552, + 431, + 557 + ], + "spans": [ + { + "bbox": [ + 424, + 552, + 431, + 557 + ], + "type": "image", + "image_path": "2bb4604e65b559f08032de179856131f0797537783eeacc79f3eccd2b0f7dd37.jpg" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_body" + } + ], + "index": 56 + }, + { + "type": "image", + "bbox": [ + 434, + 552, + 440, + 557 + ], + "blocks": [ + { + "bbox": [ + 434, + 552, + 440, + 557 + ], + "lines": [ + { + "bbox": [ + 434, + 552, + 440, + 557 + ], + "spans": [ + { + "bbox": [ + 434, + 552, + 440, + 557 + ], + "type": "image", + "image_path": "97d9635db506762758a4df3dbf6aa06a6fcab3df945535f5fe1735ee297ab44f.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 443, + 552, + 449, + 557 + ], + "blocks": [ + { + "bbox": [ + 443, + 552, + 449, + 557 + ], + "lines": [ + { + "bbox": [ + 443, + 552, + 449, + 557 + ], + "spans": [ + { + "bbox": [ + 443, + 552, + 449, + 557 + ], + "type": "image", + "image_path": "ffd629f7f7e75549359fde7427e50e5d3bdbadedeb27a25b9dcc7b169a3c6815.jpg" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_body" + } + ], + "index": 58 + }, + { + "type": "image", + "bbox": [ + 452, + 552, + 459, + 557 + ], + "blocks": [ + { + "bbox": [ + 452, + 552, + 459, + 557 + ], + "lines": [ + { + "bbox": [ + 452, + 552, + 459, + 557 + ], + "spans": [ + { + "bbox": [ + 452, + 552, + 459, + 557 + ], + "type": "image", + "image_path": "86721f53456f8242f459cbc94cfdb8022a9218ecb8bb1279d08b577df5f60a33.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + } + ], + "index": 59 + }, + { + "type": "image", + "bbox": [ + 468, + 552, + 474, + 557 + ], + "blocks": [ + { + "bbox": [ + 468, + 552, + 474, + 557 + ], + "lines": [ + { + "bbox": [ + 468, + 552, + 474, + 557 + ], + "spans": [ + { + "bbox": [ + 468, + 552, + 474, + 557 + ], + "type": "image", + "image_path": "167690a1116fa3a65a9272f93b7c75944cfdb4555c529306b935c42b6cabbfcc.jpg" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_body" + } + ], + "index": 60 + }, + { + "type": "image", + "bbox": [ + 482, + 552, + 488, + 557 + ], + "blocks": [ + { + "bbox": [ + 482, + 552, + 488, + 557 + ], + "lines": [ + { + "bbox": [ + 482, + 552, + 488, + 557 + ], + "spans": [ + { + "bbox": [ + 482, + 552, + 488, + 557 + ], + "type": "image", + "image_path": "ae63e4f0bb0e73421d1c086dfad90e8ed1ab8c8c8cc45e4aad8f14002bf60f93.jpg" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_body" + } + ], + "index": 61 + }, + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "text", + "content": "Overall, we collect 194.2K spatio-temporal captions as the first existing large-scale dense video-region captioning dataset. We convert these spatio-temporal captions into three tasks for training: RCap (194.2K): Given the video region and timestamps, the model generates a caption; RTLoc (194.2K): Given the video region and caption, the model localizes the action; and RDCap (122.3K): Given the video region, the model generates dense, localized captions. In total, we construct " + }, + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "inline_equation", + "content": "194.2\\mathrm{K} + 194.2\\mathrm{K}" + }, + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "inline_equation", + "content": "+122.3\\mathrm{K} = 522.7\\mathrm{K}" + }, + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "text", + "content": " samples, of which " + }, + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "inline_equation", + "content": "476.2\\mathrm{K}" + }, + { + "bbox": [ + 104, + 634, + 504, + 700 + ], + "type": "text", + "content": " are used for training and the rest for constructing" + } + ] + } + ], + "index": 63 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 711, + 249, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 711, + 249, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 711, + 249, + 722 + ], + "type": "text", + "content": "1 obtained with LLM-based tagging." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 65 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "PLM-VideoBench. Please refer to Fig. 5 for dataset examples, Table 19 for comparison with existing datasets, Table 20 for dataset statistics and Appendix H for further details." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 106, + 210, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 210, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 210, + 118 + ], + "type": "text", + "content": "5.1 PLM-VideoBench" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 127, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 127, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 127, + 506, + 172 + ], + "type": "text", + "content": "Our high-quality human-annotated data offers VLMs to train for broader range of capabilities for holistic video understanding. However, existing video benchmarks are not adequately equipped to evaluate these. To this end, we introduce PLM-VideoBench, a novel benchmark focusing on specific activities (what) and their execution details (how) within spatio-temporal contexts (where and when)." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 178, + 504, + 309 + ], + "blocks": [ + { + "bbox": [ + 106, + 178, + 504, + 309 + ], + "lines": [ + { + "bbox": [ + 106, + 178, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 504, + 309 + ], + "type": "image", + "image_path": "87fd1bd30b14c09e549e689f3d4cafff2807a92481fbf1c5bf4db17ddcec181b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 313, + 504, + 346 + ], + "lines": [ + { + "bbox": [ + 104, + 313, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 504, + 346 + ], + "type": "text", + "content": "Figure 6: PLM-Video Dataset includes fine-grained video QA (FGQA), open-ended QA in videos recorded using smart glasses (SGQA), Spatio-Temporal Captions (STC) post-processed into video region captioning (RCap), video region temporal localization (RTLoc) and video region dense captioning (RDCap) tasks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": "Fine-Grained Question Answering (FGQA). In this task, a model must answer a multiple-choice question (MCQ) that probes nuanced, fine-grained activity understanding (e.g., painting \"vertically\" vs. \"horizontally\" in Fig. 6, first). We report multi-binary accuracy (MBAcc) [99] where each question is split into multiple binary choice questions. Our test set consists of 4,371 question-answer pairs. For more information, including statistics on video clips, segment duration, question types, and benchmark construction, see Table 18 and §G.2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 437, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 506, + 525 + ], + "type": "text", + "content": "Smart Glasses Question Answering (SGQA). In this task, a model must answer open-ended questions about activities and objects visible in an egocentric video stream recorded by a smart-glasses device (see Fig. 6, second). The questions are designed to simulate real-world scenarios where a user would ask for assistance from their smart glasses. We manually collect the videos using commercially available smart glasses, providing a completely new, unique dataset that reflects modern use-cases such as online AI video assistance and activity coaching. For evaluation, we use LLM-judge accuracy with an open-access model (Llama3.3 70B). The test set consists of 665 human-annotated question-answer pairs. See Appendix I for more details." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 536, + 505, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 505, + 615 + ], + "type": "text", + "content": "Video Region Captioning (RCap). In this task, a model must generate a detailed description of an event involving a subject of interest in the video. Given a region masklet and a specified time interval, the model is required to output a caption that accurately describes the event occurring within that interval. Compared to traditional video captioning [125, 83, 84] where the aim is to generate a video-level caption, the goal is to generate a region-level caption tied to a specific subject (e.g., a person, object or animal) (see Fig. 6, third). The test set contains 10,060 human-annotated instances and we report LLM-judge accuracy with Llama3.3 70B. See Appendix C.3 for details." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 624, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 505, + 723 + ], + "type": "text", + "content": "Region Temporal Localization (RTLoc). In this task, a model must identify the precise time interval within the video when the specified event takes place for the given subject. Given a video, a region masklet and a text description of the event, the model is required to output the start and end timestamps that correspond to the occurrence of the event (see Fig. 6 fourth). Notably, this task is the inverse of RCap — instead of generating the caption, the model receives it as input and generates the corresponding time interval. We filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap. We report average recall@1 over IoU thresholds (0.3, 0.5, 0.7, 0.9). See Appendix C.3 for details." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 161 + ], + "type": "text", + "content": "Region Dense Video Captioning (RDCap). In this task, a model must generate a detailed description of all events involving a specific subject of interest (e.g., person, animal, or object) in a video. Given a video and a region masklet, the model must produce a sequence of (start, end, caption) tuples that cover the entire duration of the video, including periods when the subject is not visible (see Fig. 6, last). This task is a composition of RTLoc and RCap, requiring the model to produce both temporal windows for events as well as captions directly from the video. The test set contains 2,620 samples and we report the SODA score [126] which uses an LLM judge. See Appendix C.3 for details." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 175, + 192, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 192, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 192, + 189 + ], + "type": "text", + "content": "6 Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 199, + 504, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 504, + 232 + ], + "type": "text", + "content": "We first overview the baselines and evaluation setting (§6.1). We then compare benchmark results of PLMs with the baselines on a broad collection of image (§6.2) and video (§6.3) tasks as well as on our PLM-VideoBench (§6.4). Finally, we provide analyses on data and model ablations (§6.5)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 239, + 155, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 155, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 155, + 251 + ], + "type": "text", + "content": "6.1 Setup" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 259, + 370, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 370, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 370, + 271 + ], + "type": "text", + "content": "We compare PLMs against the following two classes of baselines:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 274, + 504, + 331 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 274, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 504, + 297 + ], + "type": "text", + "content": "- Proprietary models such as GPT-4o [33] (gpt-4o-2024-11-20), Gemini-Pro 1.5 [34] and Gemini-Flash 2.0 [35]. We use API calls to evaluate these models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 298, + 504, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 298, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 504, + 331 + ], + "type": "text", + "content": "- Open-access models such as Molmo-O [11], LLaVA-OneVision [28], Qwen2.5-VL [106] and InternVL2.5 [10] — state-of-the-art open-access models, for which model scale, architecture and inference code are available. We use the official inference code for all models." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 340, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 504, + 396 + ], + "type": "text", + "content": "Inference protocol. For mask inputs in PLM-VideoBench, we overlay a colored box on the video frames to specify the regions. We report validation set performance unless specified (in brackets) under the benchmark name. Metrics marked with " + }, + { + "bbox": [ + 104, + 340, + 504, + 396 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 104, + 340, + 504, + 396 + ], + "type": "text", + "content": " use LLM as a judge. Complete implementation details including inference hyper-parameters, task prompts, judge prompts and proprietary model evaluation protocol can be found in Appendix C.4." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 403, + 243, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 243, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 243, + 415 + ], + "type": "text", + "content": "6.2 Image Benchmark Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 418, + 506, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 506, + 473 + ], + "type": "text", + "content": "We evaluate PLM on a total of 20 image benchmarks. Charts, Diagrams and Documents: answer questions that require parsing images of documents and diagrams; Image Captioning: generate a short/detailed caption, Perception and Reasoning: answer questions of varying difficulty about objects, actions, functional correspondence, multi-view reasoning, spatial layout etc. and Hallucination: evaluate robustness to hallucinated details. More details are in Appendix C.1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 478, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 504, + 502 + ], + "type": "text", + "content": "Table 3 shows our results. Overall, PLM shows strong performance on a wide spectrum of image benchmarks with solely from open-access data with a white-box data engine. Additionally, we report" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 106, + 515, + 504, + 709 + ], + "blocks": [ + { + "bbox": [ + 106, + 515, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 106, + 515, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 106, + 515, + 504, + 709 + ], + "type": "table", + "html": "
ModelCharts, Diagrams and DocumentsPerception and ReasoningHard PerceptionHalluc.
DocVQA (test) acc [53]CharQA acc [54]TextVQA acc [52]InfoQA (test) acc [56]AL2D (n/o mask) acc [55]OCR-Bench acc [57]MMMU (rval) acc [37]VQA2 (rval) acc [111]OK-VQA acc [39]VizWiz acc [40]SEED (image) acc [58]BLINK (multi-image) acc [44]CV-Bench acc [19]RealWorldQA acc [45]VSR acc [127]POPE acc [68]
GPT-4o [33]92.8*85.7*75.380.7*94.2*81070.7*-63.9-77.1*68.0*72.573.978.087.2*
Gemini 1.5 Pro [35]94.084.274.881.0*95.783063.2-63.9-77.859.881.066.376.188.2*
Gemini 2.0 Flash [35]93.084.880.281.094.079269.9*-57.8-77.064.482.371.974.8-
1B scale
Qwen2VL-2B [30]90.1*75.380.365.5*84.6*809*41.1*80.059.767.472.944.4*17.362.6*73.087.2
InternVL2.5-1B [10]84.8*75.9*72.0*56.0*77.8*785*40.9*72.251.547.471.342.442.158.365.490.2
PLM-1B90.778.682.163.084.980734.881.761.059.776.346.873.867.168.888.4
3B scale
Qwen2.5 VL-3B [106]93.9*83.179.3*77.1*90.2797*53.1*80.863.271.973.147.6*54.465.4*78.588.2
InternVL2.5-4B [10]91.6*84.0*79.372.1*90.5*828*52.3*80.964.061.875.650.8*55.964.680.091.0
PLM-3B93.884.384.374.690.983041.284.366.864.078.555.481.472.480.488.7
8B scale
Molmo-7B-O [11]90.8*80.4*80.4*70.0*90.7*-39.3*85.3*-----67.5*--
LLaVA-OV-7B [28]86.780.077.368.890.165648.983.569.663.476.449.475.066.778.189.2
Qwen2.5VL-7B [106]95.7*87.3*84.9*82.6*93.0864*58.6*70.161.073.573.256.4*11.969.880.387.2
InternVL2.5-8B [10]93.0*84.8*79.377.6*92.8*82356.0*80.669.264.377.654.8*53.970.1*80.090.6*
PLM-8B94.685.586.580.992.787046.185.669.667.079.356.081.375.082.889.9
", + "image_path": "62e5bca9a4bdf80bc199e8421fe81eaf8f45fa81c53e4ab87f0912b80879803e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 713, + 504, + 734 + ], + "lines": [ + { + "bbox": [ + 104, + 713, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 713, + 504, + 734 + ], + "type": "text", + "content": "Table 3: Image benchmarks. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature, and the remaining are reproduced using official code." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 64, + 504, + 258 + ], + "blocks": [ + { + "bbox": [ + 109, + 64, + 504, + 258 + ], + "lines": [ + { + "bbox": [ + 109, + 64, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 109, + 64, + 504, + 258 + ], + "type": "table", + "html": "
ModelVCap.Video QAFine-grained Video QAT.Loc.Halluc.
DREAM-1K F/F [86]MVBench acc [70]NEX-TQA acc [69]PerceptionTest (test) acc [71]STAR acc [72]Video-MME acc [75]ActivityNet-QA acc [76]EgoSchemas (test) acc [90]TemporalBench MBA acc [99]TOMATO MBO acc [100]MotionBench (dev) acc [101]TempCompass (MCC) acc [102]CG-Bench (clue) acc [97]Charades-STA mOU [113]VideoHallucer overall acc [88]EventHallusion (binary) acc [89]
Proprietary
GPT-4o [33]-64.6*79.1-70.471.9*-72.2*38.5*37.7*55.974.558.3*38.656.491.9*
Gemini 1.5 Pro [35]-60.5*81.665.9-75.0*56.7*71.2*34.732.056.175.650.1*34.256.080.9
Gemini 2.0 Flash [35]-60.781.9--70.3*-71.5*27.632.856.176.947.0*29.860.181.6
1B scale
Qwen2VL-2B [30]26.863.2*76.453.9*67.355.6*38.427.013.125.746.962.342.80.334.959.9
InternVL2.5-1B [10]27.764.874.359.473.050.3*60.755.727.725.045.056.440.90.831.038.9
PLM-1B34.370.180.372.783.749.262.560.418.225.552.264.643.655.249.279.5
3B scale
Qwen2.5 VL-3B [106]20.367.076.866.9*63.061.5*59.264.8*17.223.549.263.045.738.8*45.253.5
InternVL2.5-4B [10]29.271.782.567.977.262.3*64.166.623.727.452.765.252.08.449.666.3
PLM-3B37.474.783.479.384.854.966.266.923.430.960.469.347.257.755.576.5
8B scale
LLaVA-OV-7B [28]28.057.181.058.166.057.760.545.419.527.653.767.841.212.134.761.1
Qwen2.5VL-7B [106]23.369.6*80.070.5*68.165.5*63.765.0*24.524.651.171.7*49.843.6*50.161.1
InternVL2.5-8B [10]28.572.685.568.9*77.664.2*66.166.2*24.329.453.568.3*53.114.357.160.2
PLM-8B35.977.184.182.784.958.367.368.828.333.261.472.746.458.657.777.3
", + "image_path": "52ea6fb7910ba74270ecfac5de8092c609865cd4806fe73566a5fb07f843d9bf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "content": "Image Grounding task results on RefCOCO/+/g [65] datasets in Appendix Table 14, and show that PLM outperforms both specialist models as well as the VLM baselines in all model scales." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 324, + 241, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 241, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 241, + 335 + ], + "type": "text", + "content": "6.3 Video Benchmark Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 340, + 506, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 417 + ], + "type": "text", + "content": "We evaluate PLM on a total of 25 video benchmarks. We divide these into the following categories. Video Captioning: generate a short caption for a video, or a dense description of all events; Short video QA: answer a question about a short video (few seconds to a minute), either by selecting from a list of options, or providing a free-form answer; Long video QA: answer a question as before, about a much longer video (minutes to hours); Fine-grained QA: answer detailed questions about spatial location, motion, temporal information etc.; and Hallucination: evaluate the robustness of video models to hallucinated details about objects and events." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 422, + 505, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 505, + 456 + ], + "type": "text", + "content": "Table 4 shows video captioning, video QA, fine-grained video QA, and video hallucination results. We achieve strong results on widely adopted benchmarks, despite only using open-access data mix free from proprietary model artifacts, outperforming both the open-access and proprietary models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 460, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 506 + ], + "type": "text", + "content": "Further, we achieve competitive performance on the majority of challenging benchmarks, such as EgoSchema (68.8 %), MotionBench (61.4 %), TOMATO (33.2 %), TempCompass (72.7 %), TemporalBench (28.3 &), Charades-STA (58.6 %), and more. All our model scales show strong performance against both proprietary models as well as open-access baselines of same scale." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 510, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 504, + 543 + ], + "type": "text", + "content": "Lastly, we also show that PLMs at all scale greatly outperform existing approaches on captioning tasks and hallucination detection tasks, owing to our focus on detailed, fine-grained spatio-temporal annotations in our human-annotated data collection." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 553, + 241, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 553, + 241, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 241, + 563 + ], + "type": "text", + "content": "6.4 PLM-VideoBench Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 569, + 307, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 307, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 307, + 723 + ], + "type": "text", + "content": "We report the result on our proposed benchmark PLM-VideoBench from §5.1 in Table 5. We evaluate our PLM as well as (proprietary and open-access) baselines. In addition, we provide human performance of each subtask in the first row. The results show a significant gap between the baselines and PLM. Proprietary baselines and open-source baselines alike perform reasonably on FGQA tasks, though still 6.5 points lower than PLM (61.2 vs 67.7). On SGQA, where the video sources and the question-answer pairs are unseen to all models, PLM performs reasonably well, yet 2.1 points short from open-access best (InternVL2.5) and far from the best proprietary model" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 316, + 552, + 503, + 688 + ], + "blocks": [ + { + "bbox": [ + 104, + 262, + 504, + 284 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 284 + ], + "type": "text", + "content": "Table 4: Video benchmark results. PLM versus proprietary models and open-access baselines of comparable scale. Cells with * are reported numbers from literature and the remaining are reproduced using official code." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 552, + 503, + 688 + ], + "lines": [ + { + "bbox": [ + 316, + 552, + 503, + 688 + ], + "spans": [ + { + "bbox": [ + 316, + 552, + 503, + 688 + ], + "type": "table", + "html": "
ModelFQQA MBAccSGQAc+†RDCap SDAD†RCap score†RTLoc meanRAvg.
Human perf.90.967.966.653.967.873.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
Open-access
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
", + "image_path": "88f1750edc60d7fca24ce4b6116dcb56895c0a14ae4539375ffb52be7846b390.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 693, + 504, + 723 + ], + "lines": [ + { + "bbox": [ + 312, + 693, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 312, + 693, + 504, + 723 + ], + "type": "text", + "content": "Table 5: PLM-VideoBench results. We evaluate PLM against baselines and report breakdowns. We report human performance in the first row." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "(GPT-4o). On spatio-temporal tasks (RDCap, DCap, RTLoc), open source baselines are unable to perform grounded reasoning and default to repeating the same caption for every time interval. Proprietary models perform reasonably well, yet far from the human performance. In all sub-tasks of PLM-VideoBench, PLM shows competitive performance compared to proprietary and open-access baselines. Results for all model scales are in Appendix D." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 133, + 504, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 504, + 166 + ], + "type": "text", + "content": "Note that the human performance varies based on the nature of the task and evaluation metrics. For example, FGQA human scores are naturally higher than RCap because the task is structured (select the correct option vs. open-ended) and the metric is objective (accuracy vs. LLM-judge accuracy)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 178, + 201, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 201, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 201, + 190 + ], + "type": "text", + "content": "6.5 Ablation Studies" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 198, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 266 + ], + "type": "text", + "content": "Setup. We perform an ablation study to assess the importance of each of our proposed data, both synthetic and human-annotated. We start with PLM 3B after stage 2 training, and finetune on 4M short image and video SFT data mix " + }, + { + "bbox": [ + 104, + 198, + 506, + 266 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 104, + 198, + 506, + 266 + ], + "type": "text", + "content": " for the data ablation. We evaluate and report average video benchmark performance across five categories — video captioning, short video QA, fine-grained QA, and video hallucination, as well as spatial and temporal tasks, PLM-VideoBench and three image categories — image OCR, image captioning, and image perception. Full details are in Appendix A.3." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 113, + 276, + 397, + 373 + ], + "blocks": [ + { + "bbox": [ + 113, + 276, + 397, + 373 + ], + "lines": [ + { + "bbox": [ + 113, + 276, + 397, + 373 + ], + "spans": [ + { + "bbox": [ + 113, + 276, + 397, + 373 + ], + "type": "table", + "html": "
PLM-Synth.PLM-STCPLM-FGQATotal AveragePLM-VideoBenchVideo TasksImage Tasks
PLM-FGQAMBaccPLM-SGQAacc†3 metric avg.Fine-Grained QA5 benchmark avg.Video Cap.Dream 1KVideo QA5 benchmark avg.Video Hallu.2 benchmark avg.Spatial&Temp.4 benchmark avg.Image OCR6 benchmark avg.Image Cap.3 benchmark avg.Image Rec.5 benchmark avg.
XXX48.539.734.46.642.224.067.564.950.676.064.363.3
XX54.349.835.914.748.829.973.273.356.184.065.965.5
X57.949.936.242.148.632.373.974.262.983.867.565.0
X56.762.943.215.250.130.474.176.358.383.764.065.6
61.263.644.042.250.234.374.676.364.383.774.265.4
", + "image_path": "2bcd6ae87dc81ee9d74711aff2bc23d783e6471cd8e696a0f03e4b894bb0b5b6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 375, + 406, + 416 + ], + "lines": [ + { + "bbox": [ + 104, + 375, + 406, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 406, + 416 + ], + "type": "text", + "content": "Table 6: Ablation. We show the impact of individual data components in PLM training. For this ablation, we use a reduced the SFT datamix consists of 4M open-access image and video data. Results are aggregated validation-set performance over selected benchmarks in each category of tasks, details in Appendix A.3." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 422, + 289, + 493, + 388 + ], + "blocks": [ + { + "bbox": [ + 422, + 289, + 493, + 388 + ], + "lines": [ + { + "bbox": [ + 422, + 289, + 493, + 388 + ], + "spans": [ + { + "bbox": [ + 422, + 289, + 493, + 388 + ], + "type": "image", + "image_path": "458aaf373773ebd260cfcb625ce01d51d1384862292a9f919b559e5e0e8baf6b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 395, + 505, + 416 + ], + "lines": [ + { + "bbox": [ + 410, + 395, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 410, + 395, + 505, + 416 + ], + "type": "text", + "content": "Figure 7: HardQA improves with PLM data." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 434, + 506, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 533 + ], + "type": "text", + "content": "Discussion. First, we observe that stage 2 synthetic data training boosts model performance across the board. Moreover, adding our PLM-STC data further improves a variety of benchmarks, including PLM-STC (+27.4 points), video captioning (+2.4 points), and most importantly, spatial and temporal tasks (+6.8 points). Adding our PLM-FGQA data improves a distinct set of categories for fine-grained activity understanding; PLM-FGQA (+13.1 points), PLM-SGQA (+7.3 points), Fine-grained video tasks (+1.3 points), video hallucination tasks (+3.0 points), and spatial and temporal tasks (+2.2 points). Using our human-annotated data altogether results in the best performance overall. Further in Fig.7, we show that our human-annotated data improves upon HardQA [97, 100, 89, 101, 99, 113, 92], effectively addressing the limitations of synthetic data discussed in §4.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 548, + 185, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 185, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 185, + 560 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 573, + 506, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 573, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 573, + 506, + 661 + ], + "type": "text", + "content": "This work presents Perception Language Model (PLM), a fully-reproducible vision-language model to transparently tackle visual perception tasks without distillation of private black-box models. We trained PLM using data from existing open-access datasets and synthetic samples generated by our data engine. We identified gaps in detailed video understanding capabilities that cannot be filled with synthetic data. In response, we collected 2.8M human-labels for fine-grained video question answering and spatio-temporally grounded captioning, and created a new benchmark, PLM-VideoBench, to evaluate these capabilities. We hope our open dataset, benchmark, and models will foster transparent research in visual perception." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 710, + 501, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 501, + 722 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 501, + 722 + ], + "type": "text", + "content": "23.8M datamix: TextQA 500K, Image QA 2.8M, and Video QA 500K. Each detail can be found in Tab. 9." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 261, + 69, + 351, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 69, + 351, + 94 + ], + "spans": [ + { + "bbox": [ + 261, + 69, + 351, + 94 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 102, + 217, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 102, + 217, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 217, + 117 + ], + "type": "text", + "content": "Table of Contents" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 121, + 482, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 121, + 482, + 134 + ], + "spans": [ + { + "bbox": [ + 129, + 121, + 482, + 134 + ], + "type": "text", + "content": "A PLM Training Details 12" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 135, + 481, + 170 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 143, + 135, + 481, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 135, + 481, + 146 + ], + "spans": [ + { + "bbox": [ + 143, + 135, + 481, + 146 + ], + "type": "text", + "content": "A.1 PLM Training Setting 12" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 147, + 481, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 147, + 481, + 158 + ], + "spans": [ + { + "bbox": [ + 143, + 147, + 481, + 158 + ], + "type": "text", + "content": "A.2 PLM Training Datamix 13" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 159, + 481, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 159, + 481, + 170 + ], + "spans": [ + { + "bbox": [ + 143, + 159, + 481, + 170 + ], + "type": "text", + "content": "A.3 Ablation Experiment Details 14" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 178, + 481, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 178, + 481, + 191 + ], + "spans": [ + { + "bbox": [ + 129, + 178, + 481, + 191 + ], + "type": "text", + "content": "B Synthetic Scaling Experiments 14" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 129, + 200, + 481, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 200, + 481, + 211 + ], + "spans": [ + { + "bbox": [ + 129, + 200, + 481, + 211 + ], + "type": "text", + "content": "C VLM Benchmark Details 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 213, + 481, + 259 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 143, + 213, + 481, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 481, + 223 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 481, + 223 + ], + "type": "text", + "content": "C.1 Image Benchmarks 16" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 224, + 481, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 224, + 481, + 236 + ], + "spans": [ + { + "bbox": [ + 143, + 224, + 481, + 236 + ], + "type": "text", + "content": "C.2 Video Benchmarks 17" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 236, + 481, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 236, + 481, + 247 + ], + "spans": [ + { + "bbox": [ + 143, + 236, + 481, + 247 + ], + "type": "text", + "content": "C.3 PLM-VideoBench 17" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 248, + 481, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 248, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 143, + 248, + 481, + 259 + ], + "type": "text", + "content": "C.4 Evaluation Protocols 18" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 269, + 481, + 301 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 129, + 269, + 481, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 269, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 129, + 269, + 481, + 281 + ], + "type": "text", + "content": "D Additional PLM-VideoBench Results 19" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 289, + 481, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 289, + 481, + 301 + ], + "spans": [ + { + "bbox": [ + 129, + 289, + 481, + 301 + ], + "type": "text", + "content": "E Baseline Implementation Details 19" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 311, + 481, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 311, + 481, + 322 + ], + "spans": [ + { + "bbox": [ + 129, + 311, + 481, + 322 + ], + "type": "text", + "content": "F Additional Results 20" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 143, + 323, + 481, + 370 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 143, + 323, + 481, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 323, + 481, + 334 + ], + "spans": [ + { + "bbox": [ + 143, + 323, + 481, + 334 + ], + "type": "text", + "content": "F.1 Comparison with LLaMA-3V 20" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 143, + 335, + 481, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 335, + 481, + 346 + ], + "spans": [ + { + "bbox": [ + 143, + 335, + 481, + 346 + ], + "type": "text", + "content": "F.2 Image Captioning 20" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 143, + 347, + 481, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 347, + 481, + 357 + ], + "spans": [ + { + "bbox": [ + 143, + 347, + 481, + 357 + ], + "type": "text", + "content": "F.3 Image Grounding 21" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 143, + 358, + 481, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 358, + 481, + 370 + ], + "spans": [ + { + "bbox": [ + 143, + 358, + 481, + 370 + ], + "type": "text", + "content": "F.4 Long Video Understanding 21" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 380, + 481, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 380, + 481, + 391 + ], + "spans": [ + { + "bbox": [ + 129, + 380, + 481, + 391 + ], + "type": "text", + "content": "G PLM-FGQA: Fine-grained QA 22" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 143, + 392, + 481, + 415 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 143, + 392, + 481, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 392, + 481, + 403 + ], + "spans": [ + { + "bbox": [ + 143, + 392, + 481, + 403 + ], + "type": "text", + "content": "G.1 Annotation process: Data Engine 22" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 143, + 403, + 481, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 403, + 481, + 415 + ], + "spans": [ + { + "bbox": [ + 143, + 403, + 481, + 415 + ], + "type": "text", + "content": "G.2 FGQA PLM-VideoBench Construction 27" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 425, + 481, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 425, + 481, + 435 + ], + "spans": [ + { + "bbox": [ + 129, + 425, + 481, + 435 + ], + "type": "text", + "content": "H PLM-STC Details 28" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 143, + 437, + 481, + 459 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 143, + 437, + 481, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 437, + 481, + 447 + ], + "spans": [ + { + "bbox": [ + 143, + 437, + 481, + 447 + ], + "type": "text", + "content": "H.1 Annotation Process 28" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 143, + 449, + 481, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 449, + 481, + 459 + ], + "spans": [ + { + "bbox": [ + 143, + 449, + 481, + 459 + ], + "type": "text", + "content": "H.2 PLM-STC Benchmark 30" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 469, + 481, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 469, + 481, + 480 + ], + "spans": [ + { + "bbox": [ + 129, + 469, + 481, + 480 + ], + "type": "text", + "content": "I Smart Glasses Data 30" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 143, + 482, + 481, + 504 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 143, + 482, + 481, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 482, + 481, + 492 + ], + "spans": [ + { + "bbox": [ + 143, + 482, + 481, + 492 + ], + "type": "text", + "content": "I.1 Data collection and annotation 30" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 143, + 494, + 481, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 494, + 481, + 504 + ], + "spans": [ + { + "bbox": [ + 143, + 494, + 481, + 504 + ], + "type": "text", + "content": "I.2 SGQA Benchmark 31" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 514, + 481, + 588 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 129, + 514, + 481, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 514, + 481, + 525 + ], + "spans": [ + { + "bbox": [ + 129, + 514, + 481, + 525 + ], + "type": "text", + "content": "J Synthetic Data Engine 31" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 129, + 535, + 481, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 535, + 481, + 546 + ], + "spans": [ + { + "bbox": [ + 129, + 535, + 481, + 546 + ], + "type": "text", + "content": "K Qualitative Results 35" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 129, + 555, + 481, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 555, + 481, + 567 + ], + "spans": [ + { + "bbox": [ + 129, + 555, + 481, + 567 + ], + "type": "text", + "content": "L Limitations and Future Work 39" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 129, + 577, + 481, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 577, + 481, + 588 + ], + "spans": [ + { + "bbox": [ + 129, + 577, + 481, + 588 + ], + "type": "text", + "content": "M Broader Impact 39" + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 242, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 242, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 242, + 85 + ], + "type": "text", + "content": "A PLM Training Details" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 106, + 101, + 504, + 300 + ], + "blocks": [ + { + "bbox": [ + 106, + 101, + 504, + 300 + ], + "lines": [ + { + "bbox": [ + 106, + 101, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 504, + 300 + ], + "type": "image", + "image_path": "edd4dba1e3710253faa97eb20998e70076ec12a6c3c6fb22067fbab64044c139.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "lines": [ + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "spans": [ + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "text", + "content": "Figure 8: The figure provides an overview of the datasets used in the paper. PLM is trained with " + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "inline_equation", + "content": "47.8M" + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "text", + "content": " synthetic image and " + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "inline_equation", + "content": "18.4M" + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "text", + "content": " synthetic video, and " + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "inline_equation", + "content": "2.9M" + }, + { + "bbox": [ + 104, + 305, + 506, + 359 + ], + "type": "text", + "content": " human-labeled video samples. Our data enables PLM to perform a variety of tasks, including standard tasks like Image, Multi-image, and Video QA, as well as new video tasks such as Fine-grained QA (FGQA), Region Temporal Localization (RTLoc), Region Captioning (RCap), and Region Detailed Captioning (RDCap)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 368, + 504, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 504, + 403 + ], + "type": "text", + "content": "In this section, we describe the training details of PLM. In §A.1 we describe exact details of training setting such as hyper-parameters and implementation details. In §A.2 we describe our datamix for both synthetically generated and human-annotated parts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 414, + 228, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 228, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 228, + 426 + ], + "type": "text", + "content": "A.1 PLM Training Setting" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "type": "text", + "content": "For all three stages, we use AdamW optimizer [128] with weight decay of 0.05 and use FSDP [129] with FlashAttention2 [130] for overall implementation based on PyTorch [131]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "type": "text", + "content": "Stage 1 training. In stage 1, we use a subset of SA-1B [105] paired with detailed captions generated by our data engine (§4.1). We use total 1M samples to train PLM with next token prediction loss, with vision encoder and LLM parameters frozen. This stage is commonly known as warm-up stage. We use learning rate " + }, + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "type": "text", + "content": " for all model scale with global batch size of 512 and " + }, + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 104, + 468, + 506, + 534 + ], + "type": "text", + "content": " resolution. We use the Perception Encoder [104] L/14 variant for the 1B and 3B PLM models, and the G/14 variant for the 8B PLM model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "content": "Stage 2 training. In Stage 2, we train on a total of 72.5M samples. Of these, 66M consist of images and videos with synthetically generated annotations produced by our data engine. The remaining 6.5M samples are a subset of human-annotated images and videos from open-source datasets, which are included in our final datamix described in §A.2. We train with global batch size of 2048, learning rate of " + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "inline_equation", + "content": "4 \\times 10^{-5}" + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "content": ", weight decay of 0.05 for the full set of parameters (vision encoder, projector, and LLM). For both image and video input, we use " + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "content": " resolution for each tile/frame, which effectively generate 1024 vision tokens. We apply " + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "content": " spatial average pooling to reduce this to 256. We use dynamic tiling with a thumbnail to support any resolution and aspect ratio, similar to prior work [12], and uniform sampling of video frames after preprocessing the videos to 1 fps. We set the maximum number of tiles/frames to be 16, which results in maximum of " + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "inline_equation", + "content": "(16 + 1) \\times 256 = 4352" + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "inline_equation", + "content": "16 \\times 256 = 4096" + }, + { + "bbox": [ + 104, + 545, + 506, + 679 + ], + "type": "text", + "content": " vision tokens respectively for images and videos. We train the model with a sequence length of 6144 allowing a maximum of 2048 tokens for the text modality." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Stage 3 training. In stage 3, we use total of 19.1M high-quality datamix spanning over multiple image, video, and text modalities. We describe this datamix in §A.2. In this stage, we use global batch size of 1024, learning rate of " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": " for 8B and " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "4 \\times 10^{-5}" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": " for 1B and 3B PLM models. We" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "train the full set of parameters for all scales. Similar to stage 2, we adapt dynamic tiling and uniform frame sampling for up to 36 tiles for image and 32 frames for video, with " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " spatial average pooling, which generates " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "(36 + 1) \\times 256 = 9472" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " vision tokens for image and " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "32 \\times 256 = 8192" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " vision tokens for video. For all modalities, we use 11264 maximum training sequence length." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 142, + 234, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 234, + 155 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 234, + 155 + ], + "type": "text", + "content": "A.2 PLM Training Datamix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 167, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 277 + ], + "type": "text", + "content": "Table 9 presents the full data mix used across all training stages apart from our manually collected data in §5. This contains annotations from existing public datasets as well as synthetically generated data (see §4). We filter and include a wide variety of existing datasets spanning across images (captioning, QA, grounding), videos (captioning, QA, temporal localization, region captioning and dense captioning) and text-only datasets to preserve the text-instruction following capabilities of our model. Most importantly, we filter out every dataset that contains annotations generated by proprietary models. Table 7 and Table 8 shows the exact number of samples for each datasets in Stage 2 and Stage 3 respectively. Marjory of the data in stage 2 are synthetic, with a focus on captioning samples, since they carry the dense information about the image or video. In stage 3, we have one third of the data, mostly focusing on human annotated samples, covering a large variety of tasks." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 297, + 495, + 628 + ], + "blocks": [ + { + "bbox": [ + 106, + 297, + 495, + 628 + ], + "lines": [ + { + "bbox": [ + 106, + 297, + 495, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 495, + 628 + ], + "type": "table", + "html": "
DatasetNum SamplesTypeDatasetNum SamplesType
Image SyntheticImage Synthetic
PDFAcc (QA) [132]12MQAPDFAcc (QA) [132]2MQA
PDFAcc (Cap) [132]12MCap.ArxivCap [134]1.5MCap./QA
UCSF [133]6MQASA1B [105]800KCap.
ArxivCap [134]1.8MCap./QAObject365 [135]300KCap.
SA1B [105]10MCap.OpenImages [136]300KCap.
Object365 [135]3.5MCap.DocVQA [53]100KQA
OpenImages [136]1.8MCap.InfographicVQA [56]50KQA
DocVQA [53]50KQAPixmoCap [11]500KCap
InfographicVQA [56]20KQAVideo Synthetic
PixmoCap [11]600KCapYT-1B (QA) [137]300KMCQA
Video SyntheticEgo4D (Cap.) [115]180KCap.
YT-1B (Cap.) [137]14MCap.Ego4D (QA) [115]700KQA
YT-1B (QA) [137]3MMCQASpoken Moments [138]449KCap.
Ego4D (Cap.) [115]180KCap.Charades [139]8KCap.
Ego4D (QA) [115]700KQAKinetics710 [121]40KCap.
Spoken Moments [138]449KCap.DiDeMo [140]7.5KCap.
Charades [139]8KCap.Text Synthetic
Kinetics710 [121]40KCap.NaturalReasoning [141]1MQA
DiDeMo [140]7.5KCap.Human Annotated
Text SyntheticImage QA [9]2.8MQA
NaturalReasoning [141]1MQAImage Cap [9]36KQA
Human AnnotatedImage Grnd. [9]1.4MQA
Image QA [9]2.8MQAImage Misc. [9]1.4MQA
Video QA [9]570KQAVideo QA [9]570KQA
Video TL [9]16KTemp. Loc.Video Cap. [9]315KQA
Video Dense Cap. [9]10KDense Cap.Video TL [9]16KTL
Text QA [9]2MMixVideo Dense Cap. [9]10KDCap.
Total72.5MVideo Region Captioning [9]15KCap.
", + "image_path": "ab9bd49c00e0ac2c61fb94363fcc5cec51c4b1cbfe090cb6a415f58a3eb577ea.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 629, + 266, + 641 + ], + "lines": [ + { + "bbox": [ + 105, + 629, + 266, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 629, + 266, + 641 + ], + "type": "text", + "content": "Table 7: PLM Stage 2 training data mix." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 318, + 683, + 481, + 695 + ], + "lines": [ + { + "bbox": [ + 318, + 683, + 481, + 695 + ], + "spans": [ + { + "bbox": [ + 318, + 683, + 481, + 695 + ], + "type": "text", + "content": "Table 8: PLM Stage 3 training data mix." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 70, + 194, + 338 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 194, + 338 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 194, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 194, + 338 + ], + "type": "table", + "html": "
DatasetSize
DVQA [142]222222
PlotQA [143]157070
MapQA [144]42761
OCRVQA [145]167646
Localized Narratives [146]199998
FigureQA [147]119999
Hateful Memes [148]9713
CLEVR [149]73181
CLEVR v.0 [149]70000
IconQA [150]116514
TextVQA [112]21953
GeomVerse [151]11162
RobuT (wikiqsl) [152]80757
WebSight [153]10000
Visual7W [154]15961
TallyQA [155]100050
Robust (WTO) [152]42495
DaTik [156]47974
CocoQA [157]46287
ChartQA [109]27395
VQAv2 [111]82772
Chart2Text [158]35946
VisText [159]35995
FinQA [160]5276
DocVQA [53]12089
STVQA [161]18684
TAT-QA [162]2199
RenderedText [163]10435
RAVEN [164]31418
IAM [165]7549
A-OKVQA [39]17720
TabMWP [166]45439
CocoQA [157]9009
TextCaps [167]21953
Screen2Words [168]16713
VSR [169]2157
TQA [170]9742
Robust (SQA) [152]12769
VisualMRC [171]3027
ScienceQA [61]9947
VQA-RAD [172]313
InfographicVQA [56]2118
Hitab [173]4995
AI2D [55]4863
Inter-GPS [174]2555
diagram_image_to_text [175]595
MIMIC-IT (CGD) [176]70539
MultiHiert [177]15233
NLVR2 [178]136799
RAVEN (Multi-image) [164]56081
SpotTheDiff [179]19340
", + "image_path": "cd1b284ae8d1f3f30f8088b18b59622d25aa293d59cf4db1f85924f860087e5e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 198, + 70, + 302, + 339 + ], + "blocks": [ + { + "bbox": [ + 198, + 70, + 302, + 339 + ], + "lines": [ + { + "bbox": [ + 198, + 70, + 302, + 339 + ], + "spans": [ + { + "bbox": [ + 198, + 70, + 302, + 339 + ], + "type": "table", + "html": "
DatasetSize
STAR [72]3032
NeXT-QA [69]3870
VISION [180]9900
FlinstonesSV [181]22341
ImageCoDe [182]16594
VizWiz [40]4900
MIT-States (State Coherence) [183]1900
MIT-States (Prop. Coherence) [183]1900
WebQA [184]9338
Birds-to-Words [185]14281
AESOP [186]6915
RecipeQA (Img. Coherence) [187]8699
CLEVR-Change [188]3885
IEEdit [189]3456
ChartQA [109]45820
DocVQA [53]69562
InfographicVQA [56]32661
TextVQA [112]69170
TextCaps [167]21324
VisualMRC [171]24456
WTQ [190]16885
HME100k [191]74492
chrome_writing [163]8825
OK-VQA [110]27536
GeometrySk [174]4802
VQA-RAD [172]1793
Total2796145
Image Cap.
DatasetSize
DOCCI [192]13362
DCI [193]7599
Altogether [194]15166
Total36127
Image Misc.
DatasetSize
AI2d [55]12413
COCO cap. [49]414113
GQA-Balanced [195]943000
Total1369526
", + "image_path": "516c83e5d6908944dc93b5e074a65757fe20d9b8f53f088e3a30c9295209e87e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 308, + 70, + 400, + 346 + ], + "blocks": [ + { + "bbox": [ + 308, + 70, + 400, + 346 + ], + "lines": [ + { + "bbox": [ + 308, + 70, + 400, + 346 + ], + "spans": [ + { + "bbox": [ + 308, + 70, + 400, + 346 + ], + "type": "table", + "html": "
Grounding
DatasetSize
VisualGenome [66]154792
Flickr Entities [196]296332
DCI (Region Caption) [193]304912
RefCOCO/g+/ [197]212923
VCR [60]855577
Total1398690
Image Synth.
DatasetSize
DocVQA [53]50170
InfographicVQA [56]21660
PDFAcc (Cap.) [132]12024670
PDFAcc (QA) [132]12024670
UCSF [133]5953490
ArxivCap [134]1859680
SAIB [105]9834573
Object365 [135]3484584
OpenImages [136]1740864
PixmoCap [11]584650
Total47579011
Video QA
DatasetSize
EgoQA [119]7813
NExT-QA (instruct) [69]34114
NExT-QA (MCQ) [69]34114
PerceptionTest [71]2403
ActivityNetQA [76]23530
VideoInstruct (human) [20]25803
CLEVERR (MC) [120]42620
CLEVERR (QA) [120]40000
Kinetics710 [121]39949
SVv2 (classification) [122]40000
VdLNN [123]43126
VdLNN (QA) [123]75090
How2QA [8]45731
STAR [72]35297
Memento [198]40060
Memento-MultImage [198]40060
Total569710
Video Cap.
DatasetSize
VATEX (en caption) [84]259910
Charades (caption) [139]11593
ActivityNet (captions) [125]33375
YouCook2 [83]10337
Total315215
", + "image_path": "b83cdccde56ac234bb0f2e3c8ad905aa0dd7ba0225a99cc30d9278d0de75d545.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 408, + 70, + 497, + 318 + ], + "blocks": [ + { + "bbox": [ + 408, + 70, + 497, + 318 + ], + "lines": [ + { + "bbox": [ + 408, + 70, + 497, + 318 + ], + "spans": [ + { + "bbox": [ + 408, + 70, + 497, + 318 + ], + "type": "table", + "html": "
Video Temporal Loc.
DatasetSize
HiREST [199]7919
Charades [139]7566
DiDeMo [140]435
Total15920
Video Region Captioning
DatasetSize
HC-STVG [200]10131
VidLN (UVO subset) [123]5296
Total15427
Video Dense Cap.
DatasetSize
ActivityNet [125]8859
YouCook [83]1039
Total9898
Video Synth.
DatasetSize
Spoken Moments [138]449044
Charades [139]7919
Kinetics710 [121]39949
DiDeMo [140]7566
Ego4D (Cap.) [115]183029
Ego4D (QA) [115]703935
YT-1B (Cap.) [137]14792983
YT-1B (QA) [137]3383670
Total19568095
Text-QA
DatasetSize
no robots [201]9485
MathQA [202]29837
LIMA [203]1030
GSM8k (socratic) [204]7473
GSM8k [204]7473
FLAN [205]156050
Dolly15k [206]15011
Maggie Pro (MT) [207]300000
Maggie Pro [207]300000
Total2056359
", + "image_path": "701e00966d2585d20f52c85fd4a84ee611176d8dbb3ce8ee2398f029906d406d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 351, + 504, + 396 + ], + "lines": [ + { + "bbox": [ + 104, + 351, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 504, + 396 + ], + "type": "text", + "content": "Table 9: PLM training datamix. Our mix includes synthetic and manually annotated data across a combination of image data (QA, captioning, OCR, Visual grounding), video data (captioning, grounded captioning, dense captioning, temporal localization) and text-only data. Importantly, all data is publicly accessible, and not generated by proprietary models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 418, + 254, + 429 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 418, + 254, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 254, + 429 + ], + "type": "text", + "content": "A.3 Ablation Experiment Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 437, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 506, + 548 + ], + "type": "text", + "content": "We provide additional details about the ablation experiment in §6.5. We report benchmark average scores across 5 categories, along with the average across all of them. We select a representative set of benchmarks from the full set of image and video benchmarks in §6.2 and §6.3 that report comparable scores so the average results are meaningful. For Video captioning we select Dream 1K and report the LLM-judge score with Llama3.3 70B as judge. for Short Video QA, and Finegrained QA, we select benchmarks that report MCQ accuracy (and exclude open-ended QA). For Hallucination, we include both benchmarks. For Spatial and Temporal tasks, we select BLINK, CVBench, VSR, and Charades-STA. For Image Perception, we choose SEED, MMMU, VQAv2, OK-VQA, and VizWiz. We train the ablation setup of SFT with the exactly matching hyperparameters as our final run; only difference is the size of the SFT datamix." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 563, + 285, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 285, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 285, + 578 + ], + "type": "text", + "content": "B Synthetic Scaling Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 589, + 506, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 656 + ], + "type": "text", + "content": "In this section we provide additional results to the synthetic scaling experiments in §4.2. We report aggregate benchmark accuracies across three categories — Video QA, OCR QA and Image QA — by selecting representative benchmarks from each category. For VideoQA, these are STAR [72], EgoSchema [90], MVBench [70], VideoMME [75] and PerceptionTest [71]; For OCR QA, these are ChartQA [109], DocVQA [53], InfographicsQA [56], TextVQA [112] and OCRBench [57]; and for Natural Image QA, these are RealworldQA [45], OKVQA [110], VQAv2 [111], and VizWiz [40]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "content": "Scaling with encoder size. After investigating the impact of the LLM decoder in Fig. 2, we examine the impact of increasing the vision encoder size from 300M (PE Large) to 2B (PE Giant) for each language model scale next. In Fig. 9, we overlay the new power-law with the 2B vision encoder (black dashed) line onto the 300M (red dashed) line. Notably, we find that the larger vision encoder " + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "inline_equation", + "content": "(300\\mathrm{M}\\rightarrow 2\\mathrm{B})" + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "content": " leads to greater scaling trend on video QA benchmarks. Quantitatively, the power law" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 76, + 253, + 187 + ], + "blocks": [ + { + "bbox": [ + 111, + 76, + 253, + 187 + ], + "lines": [ + { + "bbox": [ + 111, + 76, + 253, + 187 + ], + "spans": [ + { + "bbox": [ + 111, + 76, + 253, + 187 + ], + "type": "image", + "image_path": "6525a07da003b5c1fffc7c0015dafa7775dff35e2d902f016405d9ed15508a31.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 197, + 504, + 217 + ], + "lines": [ + { + "bbox": [ + 104, + 197, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 504, + 217 + ], + "type": "text", + "content": "Figure 9: Scaling with encoder size. Scaling trends of PE-G vs. PE-L vision encoders. Larger encoders scale better in Video QA tasks while similar scaling in OCR and Natural QA is seen." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 255, + 76, + 376, + 183 + ], + "blocks": [ + { + "bbox": [ + 255, + 76, + 376, + 183 + ], + "lines": [ + { + "bbox": [ + 255, + 76, + 376, + 183 + ], + "spans": [ + { + "bbox": [ + 255, + 76, + 376, + 183 + ], + "type": "image", + "image_path": "573bd6d623a8d5b13a56fb1f2ea46ca6cef2b1bfd8d35822d07f3e063f8a145a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 377, + 76, + 500, + 175 + ], + "blocks": [ + { + "bbox": [ + 377, + 76, + 500, + 175 + ], + "lines": [ + { + "bbox": [ + 377, + 76, + 500, + 175 + ], + "spans": [ + { + "bbox": [ + 377, + 76, + 500, + 175 + ], + "type": "image", + "image_path": "487a1ee41bc10230d773ecc387fbde4d501749ece8e09a761762110ba188ea0b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": "fit has improved from " + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "inline_equation", + "content": "-0.15" + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "inline_equation", + "content": "-0.19" + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": ". The two lines intersect around 8B scale with PE-G, proving that 8B and larger PLM will benefit more with larger vision encoder. We use PE-L for 1B and 3B LLM scale and PE-G for 8B scale by default." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 305, + 252, + 417 + ], + "blocks": [ + { + "bbox": [ + 106, + 305, + 252, + 417 + ], + "lines": [ + { + "bbox": [ + 106, + 305, + 252, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 305, + 252, + 417 + ], + "type": "image", + "image_path": "e6fee9284c2eaca7f5a2d382d7a63ae2cdd7e4e337d5878516729ae373c966e7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 427, + 504, + 449 + ], + "lines": [ + { + "bbox": [ + 104, + 427, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 504, + 449 + ], + "type": "text", + "content": "Figure 10: Scaling with input size. Scaling trends of training with 16 tiles/frames vs. 8 tiles/frames. Higher input size scales better in Video QA and OCR QA tasks while similar trend is seen for Natural QA." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 253, + 307, + 376, + 414 + ], + "blocks": [ + { + "bbox": [ + 253, + 307, + 376, + 414 + ], + "lines": [ + { + "bbox": [ + 253, + 307, + 376, + 414 + ], + "spans": [ + { + "bbox": [ + 253, + 307, + 376, + 414 + ], + "type": "image", + "image_path": "cccf7fbf94228c5846ea07593981fd1e54fae7f5523b10ef64fbef5444020990.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 377, + 307, + 500, + 406 + ], + "blocks": [ + { + "bbox": [ + 377, + 307, + 500, + 406 + ], + "lines": [ + { + "bbox": [ + 377, + 307, + 500, + 406 + ], + "spans": [ + { + "bbox": [ + 377, + 307, + 500, + 406 + ], + "type": "image", + "image_path": "2046c732751f3301f7e0973665e26c74a9d9a78af266e79571054ff968423369.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "text", + "content": "Scaling with input size. In Fig. 10, we show the impact of increasing the input size to VLM through higher image resolution and more video frames. In this setting, each scale of PLM trains with dynamic tiling for image input and uniform sampling for video input with maximum 8 or 16 tiles/frames per sample. In each plot, the average error of PLM trained with 16 tiles/frames are plotted. All models use " + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "text", + "content": " spatial average pooling before input to LLM, and each tile/frame has " + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "text", + "content": " resolution. Similar to Fig. 2, we show power law fit with a black dashed line, and compare to 8 tiles/frames training denoted with red dashed line. Notably, we find out that on Video QA and OCR QA benchmarks, PLM shows better scalability with training with higher input size. This means with the same FLOP counts at " + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "inline_equation", + "content": "10^{13}" + }, + { + "bbox": [ + 104, + 487, + 506, + 619 + ], + "type": "text", + "content": ", training with 16 frames makes 2.0 points of metric error lower than 8 frames counterpart (32.2 vs 30.2). Similar trends are observed with OCR QA going from 8 tiles max. to 16 tiles max. Notably, higher resolution did not make a difference for Natural QA tasks. We chose the 16 max-tiles and frames to be our final training setting for stage 2 PLM." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 623, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 506, + 723 + ], + "type": "text", + "content": "In Fig. 11, we show the breakdown of the scaling trend shown in §4.2. “H” stands for human only (i.e., no synthetic) baseline. From the breakdown, the most notable point is the the scalability in OCR, Chart, Document QA tasks. In each benchmark, synthetic data makes more than 10 points of improvement on every model scale, compared to “no synthetic” baselines. Moreover, there is no sign of saturation; the performance will most likely improve with more synthetic data. We hypothesize that OCR, Chart, Document QA tasks reduce to “translation” task — a set of pixels has one-to-one mapping to text space. Remaining tasks exhibit clean power-law relationship between metric error and FLOPs. The last plot shows scaling trend on average over all benchmarks, which shows a close power-law relationship." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 76, + 211, + 166 + ], + "blocks": [ + { + "bbox": [ + 110, + 76, + 211, + 166 + ], + "lines": [ + { + "bbox": [ + 110, + 76, + 211, + 166 + ], + "spans": [ + { + "bbox": [ + 110, + 76, + 211, + 166 + ], + "type": "image", + "image_path": "4c457bf8ecc0ab0daa868ea993daecb6172db218673b9689778445fa6b15ba2c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 215, + 76, + 306, + 166 + ], + "blocks": [ + { + "bbox": [ + 215, + 76, + 306, + 166 + ], + "lines": [ + { + "bbox": [ + 215, + 76, + 306, + 166 + ], + "spans": [ + { + "bbox": [ + 215, + 76, + 306, + 166 + ], + "type": "image", + "image_path": "fb955c1df24e9112822420882dbfd46afea779896098cba24e5b220c768b3eac.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 76, + 403, + 166 + ], + "blocks": [ + { + "bbox": [ + 310, + 76, + 403, + 166 + ], + "lines": [ + { + "bbox": [ + 310, + 76, + 403, + 166 + ], + "spans": [ + { + "bbox": [ + 310, + 76, + 403, + 166 + ], + "type": "image", + "image_path": "74db19474066235320891f11a947c6133e419da84821eecd7a53417ba79ee176.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 406, + 76, + 499, + 166 + ], + "blocks": [ + { + "bbox": [ + 406, + 76, + 499, + 166 + ], + "lines": [ + { + "bbox": [ + 406, + 76, + 499, + 166 + ], + "spans": [ + { + "bbox": [ + 406, + 76, + 499, + 166 + ], + "type": "image", + "image_path": "5e5f6ebd7ada288edd9b5f729682ee4729833b0cb5006c1f67e04520365cd919.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 111, + 171, + 211, + 261 + ], + "blocks": [ + { + "bbox": [ + 111, + 171, + 211, + 261 + ], + "lines": [ + { + "bbox": [ + 111, + 171, + 211, + 261 + ], + "spans": [ + { + "bbox": [ + 111, + 171, + 211, + 261 + ], + "type": "image", + "image_path": "ac7fd38db459c2e2517754cba41a8e08d3c9cf19bf27aa7a1ff5269befcb79a3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 215, + 172, + 306, + 261 + ], + "blocks": [ + { + "bbox": [ + 215, + 172, + 306, + 261 + ], + "lines": [ + { + "bbox": [ + 215, + 172, + 306, + 261 + ], + "spans": [ + { + "bbox": [ + 215, + 172, + 306, + 261 + ], + "type": "image", + "image_path": "a9c3bdc1ccd219d5d46e0c496746e9f91aeebbcd86a9846587428409124b6e42.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 172, + 402, + 261 + ], + "blocks": [ + { + "bbox": [ + 310, + 172, + 402, + 261 + ], + "lines": [ + { + "bbox": [ + 310, + 172, + 402, + 261 + ], + "spans": [ + { + "bbox": [ + 310, + 172, + 402, + 261 + ], + "type": "image", + "image_path": "a99f19209d3bde08b3fb2bba24b4c3b3c12a36b8ddbc73dce7c33808a90a5be5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 406, + 172, + 498, + 261 + ], + "blocks": [ + { + "bbox": [ + 406, + 172, + 498, + 261 + ], + "lines": [ + { + "bbox": [ + 406, + 172, + 498, + 261 + ], + "spans": [ + { + "bbox": [ + 406, + 172, + 498, + 261 + ], + "type": "image", + "image_path": "a294ad701203332c43e130b55e4a3017cfe651e413ef4b236c2112092d14e26f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 111, + 267, + 211, + 356 + ], + "blocks": [ + { + "bbox": [ + 111, + 267, + 211, + 356 + ], + "lines": [ + { + "bbox": [ + 111, + 267, + 211, + 356 + ], + "spans": [ + { + "bbox": [ + 111, + 267, + 211, + 356 + ], + "type": "image", + "image_path": "11f87cbff7e7f08a9001c94408c8ee70a0f8f29c2ff7c14281721040043a07c5.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 215, + 267, + 306, + 356 + ], + "blocks": [ + { + "bbox": [ + 215, + 267, + 306, + 356 + ], + "lines": [ + { + "bbox": [ + 215, + 267, + 306, + 356 + ], + "spans": [ + { + "bbox": [ + 215, + 267, + 306, + 356 + ], + "type": "image", + "image_path": "273c50f3acd90ba19b5a065e5955c09ee0f714dc7f1e3cfc5b137b8e820a6380.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 267, + 403, + 356 + ], + "blocks": [ + { + "bbox": [ + 310, + 267, + 403, + 356 + ], + "lines": [ + { + "bbox": [ + 310, + 267, + 403, + 356 + ], + "spans": [ + { + "bbox": [ + 310, + 267, + 403, + 356 + ], + "type": "image", + "image_path": "c7494f8c14fe238b6ecea4422a302a3709fa4f8661f97f8802587ed18b99d19d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 404, + 488, + 411 + ], + "lines": [ + { + "bbox": [ + 430, + 404, + 488, + 411 + ], + "spans": [ + { + "bbox": [ + 430, + 404, + 488, + 411 + ], + "type": "text", + "content": "Power Law Fit" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 406, + 267, + 498, + 356 + ], + "blocks": [ + { + "bbox": [ + 406, + 267, + 498, + 356 + ], + "lines": [ + { + "bbox": [ + 406, + 267, + 498, + 356 + ], + "spans": [ + { + "bbox": [ + 406, + 267, + 498, + 356 + ], + "type": "image", + "image_path": "8e48e6d52f6bcbb64acd39bed728f3b5432cf2832b70f126f223cba10bc1f95c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 111, + 363, + 211, + 460 + ], + "blocks": [ + { + "bbox": [ + 111, + 363, + 211, + 460 + ], + "lines": [ + { + "bbox": [ + 111, + 363, + 211, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 363, + 211, + 460 + ], + "type": "image", + "image_path": "4ba34e7af89ce61daee2cab5adc88f0f87203cafb8df2cdb93055231529325fe.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 474, + 504, + 525 + ], + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 525 + ], + "type": "text", + "content": "Figure 11: Synthetic Scaling Plots. Relationship between Average Error and training compute (in floating-point operations) for various 1B, 3B, 8B PLM with L14 vision encoder. Each plot reports the individual error in VideoMME [75], STAR [72], EgoSchema [90], How2QA [8], MVBench [70], PerceptionTest [71], ChartQA [109], DocVQA [53], InfoVQA [56], OCRBench [57], RealworldQA [45], OKVQA [110], VQAv2 [111], VizWiz [40], and TextVQA [112]. Finally, we report Avg. All, which average over all the metrics." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 215, + 363, + 306, + 460 + ], + "blocks": [ + { + "bbox": [ + 215, + 363, + 306, + 460 + ], + "lines": [ + { + "bbox": [ + 215, + 363, + 306, + 460 + ], + "spans": [ + { + "bbox": [ + 215, + 363, + 306, + 460 + ], + "type": "image", + "image_path": "ea8890b185233495dcde92782d8a3178b0051f492f0303f6c51666a81220f5fb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 310, + 363, + 402, + 460 + ], + "blocks": [ + { + "bbox": [ + 310, + 363, + 402, + 460 + ], + "lines": [ + { + "bbox": [ + 310, + 363, + 402, + 460 + ], + "spans": [ + { + "bbox": [ + 310, + 363, + 402, + 460 + ], + "type": "image", + "image_path": "806474d8360c64160660f815fe8d5cc8cc35ffc5e40e6f53ac1bfd5ae88e9da8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 526, + 258, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 258, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 258, + 538 + ], + "type": "text", + "content": "C VLM Benchmark Details" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 552, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 606 + ], + "type": "text", + "content": "In this section, we provide details about all the image and video benchmarks considered in §6 including composition and evaluation metrics for image benchmarks (§C.1), video benchmarks (§C.2) and our PLM-VideoBench (§C.3. We also describe evaluation protocol for all these benchmarks including inference parameters and prompts (§C.4). Pointers to evaluation code are linked where available." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 621, + 216, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 621, + 216, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 216, + 632 + ], + "type": "text", + "content": "C.1 Image Benchmarks" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 642, + 505, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 642, + 505, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 505, + 665 + ], + "type": "text", + "content": "Image captioning We evaluate on single image captioning and grounded image captioning benchmarks like COCO [49], nocaps [50] and Flickr [51]. We report CIDEr as the evaluation metric." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "Perception and reasoning We evaluate on broad, general purpose VQA benchmarks like MMMU [37], VQAv2 [111], MMBench [38], OK-VQA [39], VizWiz [40] as well as hard perception benchmarks like BLINK [44], CV-Bench [19], RealWorldQA [45], and VSR [127]. For all MCQ benchmarks, we report accuracy of selecting the correct option." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "Charts, diagrams and documents We evaluate on benchmarks for reasoning over various types of charts, graphs, diagrams, infographics etc. Specifically, DocVQA [53], ChartQA [54], TextVQA [52], InfographicsVQA [56], AI2D [55], OCRBench [57], and SEED [58]. We report accuracy of selecting the correct option." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "type": "text", + "content": "Image Hallucination Finally, we evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as HallusionBench [67] and POPE [68]. For HallusionBench we report the " + }, + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "type": "inline_equation", + "content": "aAcc" + }, + { + "bbox": [ + 104, + 129, + 506, + 163 + ], + "type": "text", + "content": " metric (code) which accounts for correctness and consistency using an LLM judge." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 175, + 214, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 214, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 214, + 186 + ], + "type": "text", + "content": "C.2 Video Benchmarks" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 194, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 251 + ], + "type": "text", + "content": "Video captioning We evaluate on short-video captioning benchmarks, namely YouCook2 [83] and VATEX [84] as well as recent detailed video captioning benchmarks — DREAM-1k [86] and AuroraCap-VDC [87]. For YouCook2 and VATEX, we report CIDEr score [208]. For DREAM-1k we report AutoDQ F1-score (code) and for AuroraCap-VDC we report the VDC accuracy (code) following the author's proposed metric." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 262, + 506, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 329 + ], + "type": "text", + "content": "Short video QA We evaluate on multiple-choice (MCQ) benchmarks such as How2QA [8], NNextQA [69], PerceptionTest [71], STAR [72], TGIF-QA [73], TVQA [74], Video-MME [75] and TVBench [80]. We report accuracy of selecting the correct option. We also evaluate on open-ended question answering benchmarks (w/o options) such as ActivityNet-QA [76] (code), MMBenchVideo [79] (code) and VCGBench-Diverse [22]. We report LLM-judge scores/accuracies for these benchmarks. For VCGBench-Diverse, we report the average of 5 LLM-judge scores (code)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 340, + 505, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 505, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 505, + 374 + ], + "type": "text", + "content": "Long video QA We evaluate on popular long-video benchmarks such as EgoSchema [90], LVBench [92], LongVideoBench [94] and MLVU [96]. We report accuracy of selecting the correct option." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 385, + 505, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 505, + 440 + ], + "type": "text", + "content": "Fine-grained video QA We evaluate on benchmarks for fine-grained spatial, temporal and detail reasoning in videos such as TemporalBench [99], TOMATO [100], MotionBench [101], TempCompass [102] and CG-Bench [97]. We report accuracy of selecting the correct option. For Temporal-Bench, we report the multi-binary accuracy (MBAcc) (code) proposed by the authors to reduce bias in evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "type": "text", + "content": "Hallucination We evaluate on benchmarks that evaluate robustness of models to hallucinated details in questions such as VideoHallucer [88] and EventHallusion [89]. We report accuracy of selecting the correct option." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 498, + 211, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 211, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 211, + 510 + ], + "type": "text", + "content": "C.3 PLM-VideoBench" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 519, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 540 + ], + "type": "text", + "content": "We evaluate on our suite of benchmarks for fine-grained and spatio-temporal reasoning in videos. These include:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 553, + 505, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 505, + 609 + ], + "type": "text", + "content": "Fine-grained QA (FGQA) We report multi-binary accuracy (MBAcc) following prior work [99]. In short, this entails presenting the model multiple independent, binary-choice questions about the same video (in our case, three questions) and requiring the model to gets all of them correct, to count towards accuracy. This sets a higher bar for models, and combats bias in multiple-choice question benchmarks that prior work identifies." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "type": "text", + "content": "SmartGlasses-QA (SGQA) We report LLM-judge accuracy of the predicted answer compared to the ground truth answer. We follow existing LLM judge prompts from ActivityNetQA (code). The prompt is repeated below for completeness." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 665, + 504, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 689 + ], + "type": "text", + "content": "Video Region Captioning (PLM-RCap) We use an LLM-judge to generate the similarity scores between predicted and ground truth captions. The prompt is below." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "**Dense Video Region Captioning (PLM-RDCap)** We adapt the SODA metric [126] from dense video captioning literature for this task. To compute this metric, we use the same LLM-judge from" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "above to generate the pairwise similarity scores between predicted and ground truth captions, which is then fed to the standard metric computation routine." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 133 + ], + "type": "text", + "content": "Region Temporal Localization (PLM-RTLoc) We report standard temporal localization metrics, namely Mean Recall@1, averaged over a range of IoU thresholds [0.3, 0.5, 0.7, 0.9]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 148, + 222, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 222, + 160 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 222, + 160 + ], + "type": "text", + "content": "C.4 Evaluation Protocols" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 227 + ], + "type": "text", + "content": "Common evaluation protocol. For video benchmark evaluations, we sample 32 frames uniformly from the full video unless otherwise specified. For uniformity and consistency across benchmarks, we implement all LLM-judge evaluations using LLama3.3-70B-Instruct [13], following LLM judge prompts from popular evaluation frameworks [209, 210] where available. Outputs from all models are generated via greedy sampling (temperature 0)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 236, + 195, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 236, + 195, + 247 + ], + "spans": [ + { + "bbox": [ + 111, + 236, + 195, + 247 + ], + "type": "text", + "content": "SG-QA judge prompt" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 250, + 499, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 250, + 499, + 290 + ], + "spans": [ + { + "bbox": [ + 110, + 250, + 499, + 290 + ], + "type": "text", + "content": "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 300, + 179, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 300, + 179, + 308 + ], + "spans": [ + { + "bbox": [ + 111, + 300, + 179, + 308 + ], + "type": "text", + "content": "##INSTRUCTIONS:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 109, + 310, + 487, + 340 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 109, + 310, + 487, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 310, + 487, + 320 + ], + "spans": [ + { + "bbox": [ + 109, + 310, + 487, + 320 + ], + "type": "text", + "content": "- Focus on the meaningful match between the predicted answer and the correct answer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 109, + 320, + 343, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 320, + 343, + 330 + ], + "spans": [ + { + "bbox": [ + 109, + 320, + 343, + 330 + ], + "type": "text", + "content": "- Consider synonyms or paraphrases as valid matches." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 109, + 330, + 414, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 330, + 414, + 340 + ], + "spans": [ + { + "bbox": [ + 109, + 330, + 414, + 340 + ], + "type": "text", + "content": "- Evaluate the correctness of the prediction compared to the answer." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 110, + 350, + 395, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 350, + 395, + 360 + ], + "spans": [ + { + "bbox": [ + 110, + 350, + 395, + 360 + ], + "type": "text", + "content": "Please evaluate the following video-based question-answer pair:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 360, + 204, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 360, + 204, + 369 + ], + "spans": [ + { + "bbox": [ + 111, + 360, + 204, + 369 + ], + "type": "text", + "content": "Question: [question]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 370, + 221, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 370, + 221, + 380 + ], + "spans": [ + { + "bbox": [ + 111, + 370, + 221, + 380 + ], + "type": "text", + "content": "Correct Answer: [target]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 380, + 244, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 380, + 244, + 389 + ], + "spans": [ + { + "bbox": [ + 111, + 380, + 244, + 389 + ], + "type": "text", + "content": "Predicted Answer: [candidate]" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 389, + 499, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 389, + 499, + 460 + ], + "spans": [ + { + "bbox": [ + 110, + 389, + 499, + 460 + ], + "type": "text", + "content": "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. For example, your response should look like this: {\"pred\": \"yes\", \"score\": 4.8}." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 472, + 208, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 472, + 208, + 483 + ], + "spans": [ + { + "bbox": [ + 111, + 472, + 208, + 483 + ], + "type": "text", + "content": "PLM-RCap judge prompt" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 110, + 485, + 499, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 485, + 499, + 545 + ], + "spans": [ + { + "bbox": [ + 110, + 485, + 499, + 545 + ], + "type": "text", + "content": "Your task is to compare a given pair of captions and provide a single score indicating how correct the pred is compared to GT, on a scale from 0 to 10. Focus on meaning and context, not exact word matches. Penalize missing and incorrect information, with lower scores for more significant errors. High scores require accurate conveyance of all key GT information. Respond with only the score, starting your response with the number and including no additional text. Output format: [score]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 563, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 619 + ], + "type": "text", + "content": "PLM-VideoBench inference prompts. Table 10 contains example inference prompt examples for each PLM-VideoBench task. Note that some variation exists between instances in the benchmark. For example, for RCap a prompt may be \"What is happening to the subject in the region highlighted by the red rectangle ...\" instead of \"Give a detailed description of the events occurring in the region marked by the red rectangle ...\" however they convey the same underlying instruction and information." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 623, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 506, + 723 + ], + "type": "text", + "content": "Proprietary models like GPT-4o and Gemini require more careful prompting to ensure that the output formatting is respected. For example, we append instructions to prevent model hallucinations (e.g., \"You must use these frames to answer the question; do not rely on any external knowledge or commonsense\"), to prevent refusals to answer (e.g., \"Even if the information in these separate frames is not enough to answer the question, please try your best to guess an answer which you think would be the most possible one based on the question. Do not generate answers such as not possible to determine\") and in-context examples to help guide the model towards the correct output format. Model- and benchmark-specific inference prompts will be released along with our code for full reproducibility." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 70, + 504, + 259 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 504, + 259 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 504, + 259 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 504, + 259 + ], + "type": "table", + "html": "
TaskPrompt
FGQAQuestion: [question] \\n Options: \\n (A) [option1] \\n (B) [option2] \\n Only give the best option.
SGQAThe following question is asked by the camera wearer at the end of the video. Provide a detailed answer even if unsure. Try to answer in around 20-30 words. Now answer the following question based on the video content: [question]
RDCapCreate a dense caption of the subject's actions within the red rectangles, including action frames ids and brief descriptions. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video.
RCapGive a detailed description of the events occurring in the region marked by the red rectangle within frames ([start frame], [end frame]) in this 32 frame video
RTLocGiven the region marked by the red rectangle in the video, please provide the start and end frame of when '[event]' happens. Use the format (start, end), where start and end are frame numbers between 0 and 31 in this 32 frame video.
", + "image_path": "6462db27df34dca5202d51ff30b4e6d53ccde5c5c7a6e1bbdb6382312ad5451a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 308, + 318, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 318, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 318, + 321 + ], + "type": "text", + "content": "D Additional PLM-VideoBench Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 334, + 504, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 504, + 368 + ], + "type": "text", + "content": "We present benchmarking results across all model scales (1B, 3B, 8B) in Table 11, to supplement the 8B model results in the main paper (Table 5). Our approach consistently outperforms baselines across all scales, including proprietary models whose model scale is unknown." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 211, + 379, + 399, + 593 + ], + "blocks": [ + { + "bbox": [ + 104, + 264, + 504, + 286 + ], + "lines": [ + { + "bbox": [ + 104, + 264, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 504, + 286 + ], + "type": "text", + "content": "Table 10: PLM-VideoBench task prompts. Items in square brackets are placeholders filled in for each benchmark instance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 211, + 379, + 399, + 593 + ], + "lines": [ + { + "bbox": [ + 211, + 379, + 399, + 593 + ], + "spans": [ + { + "bbox": [ + 211, + 379, + 399, + 593 + ], + "type": "table", + "html": "
ModelFGOAMBaccSGQAAcc†RDCAPSOA‡RCapscore†RTLocmeanRAvg
Human perf.90.967.966.653.967.870.9
Proprietary
GPT-4o [33]61.263.720.935.733.151.6
Gemini 1.5 Pro [35]57.149.914.433.127.644.0
Gemini 2.0 Flash [35]58.744.813.230.927.642.5
1B scale
Qwen2VL-2B [30]39.038.50.918.110.829.1
InternVL2-1B [10]35.828.90.317.22.723.8
InternVL2.5-1B [10]42.339.66.723.61.630.8
PLM-1B57.640.950.340.957.749.4
3B scale
Qwen2.5 VL-3B [106]43.745.10.317.213.933.1
InternVL2-4B [10]43.241.70.519.99.630.3
InternVL2.5-4B [10]50.049.24.925.915.435.3
PLM-3B67.138.853.145.058.253.0
8B scale
LLaVA-OV-7B [28]40.241.54.724.413.932.0
Qwen2VL-7B [30]49.244.54.117.615.135.3
Qwen2.5VL-7B [106]49.843.02.521.510.734.8
InternVL2-8B [10]47.745.91.221.511.635.0
InternVL2.5-8B [10]53.748.35.726.18.838.5
PLM-8B67.746.252.846.659.155.6
", + "image_path": "1828408ddca6d94bb42085ebace965210f63ef6d5aaefc0c5f8fd1a9b62002e5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 594, + 492, + 605 + ], + "lines": [ + { + "bbox": [ + 116, + 594, + 492, + 605 + ], + "spans": [ + { + "bbox": [ + 116, + 594, + 492, + 605 + ], + "type": "text", + "content": "Table 11: PLM-VideoBench results across all model scales to supplement results in Table 5." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 627, + 293, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 293, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 293, + 641 + ], + "type": "text", + "content": "E Baseline Implementation Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 653, + 477, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 653, + 477, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 653, + 477, + 665 + ], + "type": "text", + "content": "We provide baseline-specific implementation details for all models in §6.1 of the main paper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "Proprietary baselines We evaluate the GPT and Gemini family of models. For GPT-4o, we use the GPT-4o-2024-11-20 checkpoint. We feed 32 uniformly sampled frames regardless of video length, loaded at high image quality setting. For Gemini, we evaluate Gemini-1.5-Pro and Gemini-2.0-Flash. For VQA tasks, we input the video (without audio) which is processed internally at 1 fps. For" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "spatio-temporal tasks (RCap, RDCap, and RTLoc) we use the same inputs as for open-source models and GPT-4o. We evaluate these models using API call." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "type": "text", + "content": "Open-source models We evaluate InternVL, Qwen, Molmo and Llava-OV models. We follow official implementation and preprocessing pipelines for each. Specifically, we evaluate InternVL2 and InternVL2.5 (code); QwenVL2 and QwenVL2.5 (code); Molmo-O-0924 (code) and Llava-OV (code). For QwenVL, we sample frames at 1 fps from videos. For InternVL2, we use 12 tiles per image as this more closely matches the reported results." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 173, + 506, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 173, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 506, + 220 + ], + "type": "text", + "content": "Human performance baseline. In Table 5, we report human performance on PLM-VideoBench. For each task, we present annotators with the test sets and collect answers for each instance given the standard task prompt. Given the difficulty of RDCap, we reuse our data annotation pipeline in " + }, + { + "bbox": [ + 104, + 173, + 506, + 220 + ], + "type": "inline_equation", + "content": "\\S H" + }, + { + "bbox": [ + 104, + 173, + 506, + 220 + ], + "type": "text", + "content": " to collect new dense captions independently, rather than providing the standard task instruction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 232, + 223, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 223, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 223, + 245 + ], + "type": "text", + "content": "F Additional Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 257, + 258, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 257, + 258, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 258, + 270 + ], + "type": "text", + "content": "F.1 Comparison with LLaMA-3V" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 179, + 282, + 427, + 361 + ], + "blocks": [ + { + "bbox": [ + 179, + 282, + 427, + 361 + ], + "lines": [ + { + "bbox": [ + 179, + 282, + 427, + 361 + ], + "spans": [ + { + "bbox": [ + 179, + 282, + 427, + 361 + ], + "type": "table", + "html": "
ModelAvg.DocVQA (test) acc [53]CharQA (test) acc [54]TextVQA (test) acc [52]InfoQA (test) acc [56]AL2D (two mask) acc [55]MMMU (val) acc [37]VQAV2 (val) acc [111]
LLaMA 3.2V (11B) [13]73.088.483.479.763.691.150.775.2
LLaMA 3.2V (90B) [13]76.690.185.582.367.292.360.378.1
PLM (1B)67.190.778.682.163.084.934.881.7
PLM (3B)74.493.884.384.374.690.941.284.3
PLM (8B)76.294.686.586.580.992.746.185.6
", + "image_path": "828b315a69fb2c08cbb43552a082a1d2df5550d03f5affc2807edeba28365435.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 411, + 208, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 208, + 424 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 208, + 424 + ], + "type": "text", + "content": "F.2 Image Captioning" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 246, + 436, + 369, + 619 + ], + "blocks": [ + { + "bbox": [ + 104, + 362, + 504, + 385 + ], + "lines": [ + { + "bbox": [ + 104, + 362, + 504, + 385 + ], + "spans": [ + { + "bbox": [ + 104, + 362, + 504, + 385 + ], + "type": "text", + "content": "Table 12: PLM versus LLaMA-3V on Image Benchmarks: Note that we use LLaMA-3V-90B [13] for generating image captions in our synthetic data engine." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 246, + 436, + 369, + 619 + ], + "lines": [ + { + "bbox": [ + 246, + 436, + 369, + 619 + ], + "spans": [ + { + "bbox": [ + 246, + 436, + 369, + 619 + ], + "type": "table", + "html": "
ModelCOCO (karnathy) CIDEr [49]Nocap CIDEr [50]Flickr CIDEr [51]
Proprietary
GPT-4o [33]74.476.671.7
Gemini 1.5 Pro [35]70.671.168.2
Gemini 2.0 Flash [35]84.885.066.6
1B scale
Qwen2VL-2B [30]107.1101.286.0
InternVL2.5-1B [10]122.6110.586.1
PLM-1B138.6124.2100.5
3B scale
Qwen2.5 VL-3B [106]101.7105.577.5
InternVL2.5-4B [10]125.4117.187.4
PLM-3B144.9126.598.0
8B scale
LLaVA-OV-7B [28]112.170.755.7
Qwen2.5VL-7B [106]36.832.734.9
InternVL2.5-8B [10]125.8116.796.5
PLM-8B146.7129.9105.6
", + "image_path": "9709cd79ff83abbab0f0a275a7f68a66ecc1857a6d9ed49ff9b58854b7d5d41e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 624, + 506, + 647 + ], + "lines": [ + { + "bbox": [ + 104, + 624, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 506, + 647 + ], + "type": "text", + "content": "Table 13: Image Captioning benchmarks. PLM versus proprietary models and open-access baselines of comparable scale on Image Captioning benchmarks." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 208, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 208, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 208, + 85 + ], + "type": "text", + "content": "F.3 Image Grounding" + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 181, + 99, + 428, + 263 + ], + "blocks": [ + { + "bbox": [ + 181, + 99, + 428, + 263 + ], + "lines": [ + { + "bbox": [ + 181, + 99, + 428, + 263 + ], + "spans": [ + { + "bbox": [ + 181, + 99, + 428, + 263 + ], + "type": "table", + "html": "
ModelRefCOCOvalRefCOCO testARefCOCO testBRefCOCO+ valRefCOCO+ testARefCOCO+ testBRefCOCOg valRefCOCOg testAvg.
Specialists
GroundingDINO [211]90.693.288.288.289.075.986.187.086.6
UNINEXT-H [212]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [213]90.693.288.288.289.075.986.187.086.6
1B scale
PLM-1B88.591.584.883.288.676.586.086.485.7
3B scale
Qwen2.5 VL-3B [106]89.191.784.082.488.074.185.285.785.0
PLM-3B93.394.989.589.893.684.290.890.990.9
8B scale
Cube-LLM [214]90.992.687.983.989.277.486.687.287.0
Qwen2VL-7B [30]91.793.687.385.890.579.587.387.887.9
Qwen2.5VL-7B [106]89.191.784.082.488.074.185.285.785.0
InternVL2-8B [10]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [10]90.394.585.985.291.578.886.787.687.6
PLM-8B90.691.885.987.391.381.188.889.288.2
", + "image_path": "f35c016082e347045eb81156cdaf9761cb71b628a67cb8a1fc750df128649a34.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 315, + 247, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 247, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 247, + 328 + ], + "type": "text", + "content": "F.4 Long Video Understanding" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 243, + 341, + 370, + 571 + ], + "blocks": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "text", + "content": "Table 14: Image Grounding results on RefCOCO+/g. PLM performs competitively compared to the baselines across all model scales, and outperforms specialist models for the image grounding task." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 243, + 341, + 370, + 571 + ], + "lines": [ + { + "bbox": [ + 243, + 341, + 370, + 571 + ], + "spans": [ + { + "bbox": [ + 243, + 341, + 370, + 571 + ], + "type": "table", + "html": "
ModelLong Video QA
LVBench acc [92]Long VideoBench (val) acc [94]MLVU (dev) Marq [96]
Proprietary
GPT-4o [33]37.266.7*67.4
Gemini 1.5 Pro [35]33.1*64.0*69.9
Gemini 2.0 Flash [35]-61.6*69.5
1B scale
Qwen2VL-2B [30]42.047.962.7
InternVL2-1B [10]31.443.3*52.0
InternVL2.5-1B [10]35.347.957.3*
PLM-1B40.052.358.9
3B scale
Qwen2.5 VL-3B [106]43.3*54.2*68.2
InternVL2-4B [10]34.053.0*59.9*
InternVL2.5-4B [10]40.156.368.3*
PLM-3B40.457.965.0
8B scale
LLaVA-OV-7B [28]38.855.764.6
Qwen2VL-7B [30]46.055.869.8*
Qwen2.5VL-7B [106]45.3*56.0*70.2*
InternVL2-8B [10]37.055.464.0*
InternVL2.5-8B [10]43.2*60.0*68.9
PLM-8B44.556.966.4
", + "image_path": "f5228af189058077b100ec6c0dc4d98c7c57510f2dd8d1516560b8cd3d8e1e1d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 575, + 506, + 609 + ], + "lines": [ + { + "bbox": [ + 104, + 575, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 506, + 609 + ], + "type": "text", + "content": "Table 15: Results on long video understanding tasks. We compare PLM with open-access baselines and proprietary models of comparable scale, and report results over 3 long video QA benchmarks. Cells with * are reported numbers from literature. The remaining are reproduced using official code." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 290, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 290, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 290, + 85 + ], + "type": "text", + "content": "G PLM-FGQA: Fine-grained QA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 96, + 504, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 504, + 141 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 504, + 141 + ], + "type": "text", + "content": "We present PLM-FGQA Fine-grained QA (FGQA), a video dataset focused on \"how\" actions are performed, capturing nuanced fine-grained details through specially designed questions and carefully annotated answers. Due to the scarcity of fine-grained video Q&A data, see Table 16, we built a data engine to enable the collection of our 2.4M Q&A dataset, PLM-FGQA." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 166, + 152, + 442, + 258 + ], + "blocks": [ + { + "bbox": [ + 166, + 152, + 442, + 258 + ], + "lines": [ + { + "bbox": [ + 166, + 152, + 442, + 258 + ], + "spans": [ + { + "bbox": [ + 166, + 152, + 442, + 258 + ], + "type": "table", + "html": "
DatasetYear#Q&AsDatasetYear#Q&As
MovieQA20166462STAR202160000
MSRVTT-QA2017243690CLEVRER202382620
TGIF-QA2017165165EgoQA202419000
MSVD-QA201751000PerceptionTest202444146
TVQA2018152545VideoInstruct202425803
ActivityNetQA201958000MoVQA202421953
How2QA202044007CinePile2024303828
Next-QA202152044Sports-QA202594000
PLM-FGQA20252379067
", + "image_path": "efa02aa4bae3e91c46df6512d791277416bd6ded15e1faf03355c41f6db2cee2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 136, + 262, + 472, + 274 + ], + "lines": [ + { + "bbox": [ + 136, + 262, + 472, + 274 + ], + "spans": [ + { + "bbox": [ + 136, + 262, + 472, + 274 + ], + "type": "text", + "content": "Table 16: Comparison of our PLM-FGQA dataset with existing video-QA datasets." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 304, + 275, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 275, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 275, + 316 + ], + "type": "text", + "content": "G.1 Annotation process: Data Engine" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 325, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 504, + 380 + ], + "type": "text", + "content": "Our data engine is built upon the following modules: (1) Temporal Segment Generation, (2) Question Generation, (3) Answer Generation, (4) Human Annotation (answer verification/manual answer annotation), (5) Quality Control, as illustrated in Figure 12. Next, we describe each module in detail, and finally also provide additional details about the extra steps we took for forming the FG-QA component of PLM-VideoBench out of these annotations." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 106, + 392, + 504, + 434 + ], + "blocks": [ + { + "bbox": [ + 106, + 392, + 504, + 434 + ], + "lines": [ + { + "bbox": [ + 106, + 392, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 504, + 434 + ], + "type": "image", + "image_path": "74e60f80c4d1caa05d26073b5154c8482957d25d0a6ca771bb72b8df194ab130.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 437, + 434, + 450 + ], + "lines": [ + { + "bbox": [ + 175, + 437, + 434, + 450 + ], + "spans": [ + { + "bbox": [ + 175, + 437, + 434, + 450 + ], + "type": "text", + "content": "Figure 12: Data engine used to collect the PLM-FGQA dataset." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 473, + 272, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 473, + 272, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 272, + 485 + ], + "type": "text", + "content": "G.1.1 Temporal Segment Generation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 492, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 525 + ], + "type": "text", + "content": "We source the video data that serves as a basis for our annotations from publicly available datasets. Based on the video sources and the type of existing annotations, we split the videos into three distinct categories." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 530, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 504, + 586 + ], + "type": "text", + "content": "Videos with existing ground-truth segment annotations: We directly adopt segments with their human-annotated action annotations from the following datasets: Ego4d Goal-Step[215], Ego4D Moments[115], EgoExo4D [116], HT-Step[216, 217], COIN [117], CrossTask [118], and YouCook2 [83]. All those sources provide video segment boundaries accompanied by some form of textual action descriptions, and are therefore readily usable with the rest of the pipeline." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 590, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 647 + ], + "type": "text", + "content": "Unedited videos of physical activities: For physical activities videos (e.g. basketball, dancing, soccer), actions are usually atomic and short (e.g. dribble, dance move, kick) and therefore rerequire precise temporal localization. To source videos for these scenarios we used data from EgoExo4D [116] that contains temporally well-aligned and precise narrations; we obtained segments of 2-3 seconds centered around narration timings, and used the anchor narrations directly as the action description." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 651, + 504, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 504, + 706 + ], + "type": "text", + "content": "Raw, untrimmed videos in-the-wild without temporal segment annotations. We source a very large part of our data from untrimmed instructional videos in the large-scale HT100M dataset [114] which we first need to segment before use. The goal is to obtain video clips that contain meaningful, salient actions, and also caption the resulting segments with concise but accurate action descriptions. We describe the automatic segmentation and captioning module in the following." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 711, + 459, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 711, + 459, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 459, + 723 + ], + "type": "text", + "content": "The automatic segmentation and captioning pipeline involves the following three stages:" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 71, + 315, + 188 + ], + "blocks": [ + { + "bbox": [ + 107, + 71, + 315, + 188 + ], + "lines": [ + { + "bbox": [ + 107, + 71, + 315, + 188 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 315, + 188 + ], + "type": "image", + "image_path": "9c9906e83a225a513caff2dfdd4843d2b7b4a39adfecf3ff65b862754c2765b7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "type": "text", + "content": "Figure 13: Distribution of question types (left) and video sources (right) in the FGQA component of PLM-VideoBench." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 323, + 72, + 500, + 187 + ], + "blocks": [ + { + "bbox": [ + 323, + 72, + 500, + 187 + ], + "lines": [ + { + "bbox": [ + 323, + 72, + 500, + 187 + ], + "spans": [ + { + "bbox": [ + 323, + 72, + 500, + 187 + ], + "type": "image", + "image_path": "f8993a3092ffb9f6800aa080896e31690641b37cada9f9291ff4057e8ca301a8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 247, + 506, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 378 + ], + "type": "text", + "content": "Temporal segment proposal. Given untrimmed long videos, the first step is to identify semantically coherent segments within them. Inspired by prior work on unsupervised action proposal and segmentation, we leverage visual feature clustering to generate temporal segment proposals, and use shot-boundary detection results to further refine the segment boundaries. We extract clip-level visual features[218] using a sliding window with temporal stride of 1 second. We then compute the pairwise similarity between neighborhood features and detect the class-agnostic action boundaries using a boundary detection kernel (similar to those used in literature[219, 220]). Finally, since the detected segments are usually over-segmented, we perform a bottom-up agglomerate clustering approach to group adjacent segments into clusters, using a segment duration prior of 10 seconds. We also leverage shot boundary detection[221] to obtain precise moments of scene changes: we refine the boundaries of the segment proposals by aligning them to the detected shot boundaries when they're sufficiently close (" + }, + { + "bbox": [ + 104, + 247, + 506, + 378 + ], + "type": "inline_equation", + "content": "\\leq 1" + }, + { + "bbox": [ + 104, + 247, + 506, + 378 + ], + "type": "text", + "content": " second)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 383, + 504, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 383, + 504, + 438 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 504, + 438 + ], + "type": "text", + "content": "Segment filtering and ranking. How-to videos often include a lot of content that is irrelevant to the demonstration of the activity at hand, such as the instructor explaining what they are about to do or showcasing tools and ingredients. It is therefore important to detect and filter segments with such uninformative content. To that end we rank candidate segments according to relevance using a series of heuristics and learned models, described below." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 443, + 504, + 723 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 104, + 443, + 504, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 504, + 487 + ], + "type": "text", + "content": "a. Talking head detection. A common mode in instructional videos is instructors talking into the camera, describing objects or explaining actions they're about to take. To detect and remove such segments, we employ an Active Speaker Detection (ASD) pipeline[222], which we run densely on every video and combine resulting talking head tracks, to produce an ASD score for every segment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "text", + "content": "b. Hand-object interaction (HOI) detection. The presence of hand-object interaction (HOI) can be a good indicator of visually groundable actions. We leverage the temporal selection strategy[223] to filter out the segment proposals that contain hand-object interaction. We first employ an off-the-shelf robust HOI detector[224] to densely extract HOI regions within a proposed segment. The HOI score is then calculated by measuring the likelihood of hand-object interaction in the segment and the averaged probability of all the detected hands." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 563, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 662 + ], + "type": "text", + "content": "c. ASR groundability. HT100M videos contain timestamped ASR captions, which are speech transcriptions of the audio instructions. It is desirable to rank candidate segments based on how likely their ASR content is to their video content. The hypothesis here is that segments containing ASR transcriptions that align well to the video content, are more likely to be visual-information rich. Moreover since the action labeling pipeline (described next) relies on ASR metadata for producing descriptions, higher ASR groundability scores make it likelier to produce good quality segment descriptions. For every candidate segment, we compute an ASR-groundability score by computing video-text alignment scores[218] for each ASR caption within the segment and then averaging the ones that are above a threshold (we use 0.5)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "content": "d. Relevance classification. The above heuristics work well for the clear-cut cases they are tailored for, but in practice we found that they struggle with more nuanced segments (e.g. instructor fiddling with an object and describing it rather than using it). To improve the detection of those cases, we manually labelled a small amount of segments that passed through the other filters and trained a binary classifier to classify them as \"relevant\" or \"irrelevant\"; to that end we trained a simple 2-layer MLP classifier" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "on top of temporally pooled video representations with a logistic loss for binary classification. We deployed the trained model to provide a relevance score for all the candidate segments." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 100, + 505, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 505, + 133 + ], + "type": "text", + "content": "We combined the scores resulting from all the modules described above and determined cutoff thresholds, based on a small manually annotated validation set. In production, we keep all the segments that have relevance scores above those thresholds." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 138, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 138, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 138, + 506, + 205 + ], + "type": "text", + "content": "Segment captioning We follow a two-step process to obtain action labels for each unlabeled segment: In the first step, a collection of off-the-shelf perception models are used to extract individual image-level captions, video-level captions, and object detections from the segment. The output of all perception models is then fed as text into an LLM to generate long, fine-grained captions. At the second step, the detailed captions are fused with the ASR content of the segment, to obtain a consice action description. Specifically, we query an LLM (Llama 3.3 70B [13]) with the following prompt:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 214, + 253, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 214, + 253, + 224 + ], + "spans": [ + { + "bbox": [ + 111, + 214, + 253, + 224 + ], + "type": "text", + "content": "Segment to action labels prompt" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 227, + 499, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 227, + 499, + 277 + ], + "spans": [ + { + "bbox": [ + 110, + 227, + 499, + 277 + ], + "type": "text", + "content": "Detailed description: [fine grained caption] ASR transcription: [asr caption]. Given the detailed description above, identify the specific action performed as part of the activity [task name]. Your response must not be the same as the activity [task name] and needs to be a specific substep within the activity [task name]. Please also supply a rationale for your answer." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 287, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 504, + 309 + ], + "type": "text", + "content": "The extracted labeled video segments obtained through the above process serve as the foundation for the subsequent Q&A generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 323, + 277, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 277, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 277, + 335 + ], + "type": "text", + "content": "G.1.2 Automatic Question Generation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 343, + 504, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 504, + 399 + ], + "type": "text", + "content": "We automatically generate questions about the fine-grained details of the way activities are executed in the video. Our questions is generated with a variety of prompts and models which lead to increased question diversity and specificity. In Table 17 we present the question types and sample questions per question type. Here, we summarize how these questions are generated automatically with an ensemble with models and prompts:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 403, + 505, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 505, + 469 + ], + "type": "text", + "content": "LLM-based action-conditioned question generation Given a segment, its action name (e.g., cut potatoes), a task name (e.g., How to make sweet potato gratin) and optionally other metadata about the segment (for example, recognized objects [?]), we generate questions that can elicit descriptions of fine-grained details by raters with an LLM. We use tailored prompts for generating questions that cover how the activity is executed (tools, object locations, object states, direction of movements, hand pose), and the spatial arrangement of objects." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 110, + 479, + 283, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 479, + 283, + 490 + ], + "spans": [ + { + "bbox": [ + 110, + 479, + 283, + 490 + ], + "type": "text", + "content": "Activity FG question generation prompt" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 110, + 492, + 499, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 492, + 499, + 563 + ], + "spans": [ + { + "bbox": [ + 110, + 492, + 499, + 563 + ], + "type": "text", + "content": "I am learning how to [action name] while [task name]. Ask me [N] most relevant questions that reveal the details of the way the step is executed in my environment, e.g., (a) part location, (b) types of tools/ingredients used, (c) direction of movements, (d) how are objects held, (e) object states at the beginning of the step, (f) object state at the end of the step. The questions must be answerable by visually observing the activity, without reading instructions or trying out. Please indicate the type of question from (a) to (f) for each question asked at the beginning of the question." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 574, + 279, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 574, + 279, + 585 + ], + "spans": [ + { + "bbox": [ + 111, + 574, + 279, + 585 + ], + "type": "text", + "content": "Spatial FG question generation prompt" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 587, + 499, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 587, + 499, + 658 + ], + "spans": [ + { + "bbox": [ + 110, + 587, + 499, + 658 + ], + "type": "text", + "content": "Imagine I have no common sense or understanding of the 3D real world. I am trying to [task name] and am at the step where I am [action name]. There's [object list] when I'm [action name]. Ask me [N] questions about the 3D position of objects, relative location between objects, distance between objects, spatial relationship using prepositions like above, below, next to, etc. that I might want to know. The questions must be answerable by only visually observing me performing activity, without reading instructions or trying out." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "type": "text", + "content": "We explicitly encourage the LLM to provide questions that can be answered solely based on the video frames, in contrast to questions that are focused on external knowledge or non-groundable concepts or judging the execution of the step (e.g., avoid questions like is the pan hot enough to add the oil?), what tool is typically used to loosen the axle nut). The rationale for this is to collect as many Q&A pairs that a model cannot answer just based on external knowledge/language prior, but they rather" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "require vision perception to be answered. Note that these questions are generated without visual input, hence they are not instance-specific and might not be answerable given the video segment." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 100, + 506, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 506, + 242 + ], + "type": "text", + "content": "VLM-based instance-specific question generation After collecting a first set of Q&As using the LLM-generated questions, we bootstrap a VLM Question Generator model, which takes as input the video segment, question types and optionally the task name, and generates a set of instance-specific visual questions. The VLM Question Generator model is obtained by supervised fine-tuning of PLM with a question generation instruction-tuning dataset which consists of triplets (video, prompt, response), where the prompt includes the instruction to generate questions based on question types and the response includes example questions to be generated for the given video. Due to the lack of such a dataset with fine-grained question, we synthetically generated it by utilizing the Q&A pairs obtained based on the LLM-generated questions. Specifically, for each video segment, we use an LLM to (1) decompose existing Q&A pairs into multiple Q&A pairs, with each new question focusing on one detail of the original answer; (2) tag question types for the generated questions based on an expanded list of question types; and (3) generate a (prompt, response) pair for the segment. This resulted in " + }, + { + "bbox": [ + 104, + 100, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\sim 600k" + }, + { + "bbox": [ + 104, + 100, + 506, + 242 + ], + "type": "text", + "content": " training instances." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 251, + 284, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 251, + 284, + 261 + ], + "spans": [ + { + "bbox": [ + 110, + 251, + 284, + 261 + ], + "type": "text", + "content": "VLM Question Generator training sample" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 263, + 499, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 263, + 499, + 304 + ], + "spans": [ + { + "bbox": [ + 110, + 263, + 499, + 304 + ], + "type": "text", + "content": "Generate 3 different questions that reveal the fine-grained details of the way the activity is executed. In particular, focus on these question types: fine-grained object locations, hand pose, object/repetition counts, generating at least one question per type. Write each question in a separate line, e.g., Q1. first question." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 305, + 203, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 305, + 203, + 314 + ], + "spans": [ + { + "bbox": [ + 111, + 305, + 203, + 314 + ], + "type": "text", + "content": "Q2. second question." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 321, + 192, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 321, + 192, + 333 + ], + "spans": [ + { + "bbox": [ + 111, + 321, + 192, + 333 + ], + "type": "text", + "content": "ON. N-th question." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 334, + 153, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 334, + 153, + 343 + ], + "spans": [ + { + "bbox": [ + 111, + 334, + 153, + 343 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 344, + 397, + 373 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 111, + 344, + 370, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 344, + 370, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 344, + 370, + 354 + ], + "type": "text", + "content": "Q1. Where are the tomatoes positioned prior to being cut?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 354, + 397, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 354, + 397, + 363 + ], + "spans": [ + { + "bbox": [ + 111, + 354, + 397, + 363 + ], + "type": "text", + "content": "Q2. How is the person grasping the tomato with their left hand?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 365, + 365, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 365, + 365, + 373 + ], + "spans": [ + { + "bbox": [ + 111, + 365, + 365, + 373 + ], + "type": "text", + "content": "Q3. How many tomatoes did the person use in the segment?" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 382, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 382, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 382, + 504, + 427 + ], + "type": "text", + "content": "LLM-based follow-up question generation This final set of questions aims to increase coverage of video details and generate highly fine-grained questions by leveraging the already collected Q&A pairs for each segment and feed them to an LLM that generates \"follow-up\" questions that are more detailed and challenging than the initial questions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 435, + 275, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 435, + 275, + 445 + ], + "spans": [ + { + "bbox": [ + 110, + 435, + 275, + 445 + ], + "type": "text", + "content": "Follow-up question generation prompt" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 109, + 448, + 501, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 448, + 501, + 588 + ], + "spans": [ + { + "bbox": [ + 109, + 448, + 501, + 588 + ], + "type": "text", + "content": "I have the following information gathered about the video: [list of previous Q&A samples] Utilizing information and details from all the provided Q&A pairs (make sure to specialize questions based on the already corrected answers, e.g., using referring expressions), ask [N] most relevant and interesting, visual questions that we can ask annotators in order to reveal NEW, rich, additional fine-grained details about the video that we don't know yet, in particular about the following question types: 'tools/ingredients', 'object counts', 'repetition counts', 'direction of movement', 'hand pose', 'fine-grained object locations', 'spatial relations', 'initial state/end state', 'action happened before/after', 'clothes wearing', 'body pose', 'main action in the video', 'temporal extent of action', 'sizes'. The questions should be specific and have a specific answer. Avoid generic questions that can be very tedious to answer, e.g., how many objects are there in the scene. Also, do not generate questions that start with \"Is ...\" and then list options. Prefer open-ended questions, e.g., starting with \"How\". [... More examples & formatting ...]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 604, + 271, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 271, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 271, + 615 + ], + "type": "text", + "content": "G.1.3 Automatic Answer Generation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 624, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 506, + 723 + ], + "type": "text", + "content": "The next step of the data engine aims to produce correct and comprehensive answers to the generated questions. We obtain automatic answers to the generated questions using a version of PLM that has been fine-tuned with extra privileged information of various forms as input. The privileged information includes textual annotations from the metadata available with the candidate training videos and feature embeddings extracted from off-the-shelf models. Useful textual metadata include the video title, ASR captions or written descriptions, video-level task name (inferred by an LLM using the title and captions), and any existing QAs for that video. Off-the-shelf embeddings include frame-level features extracted denseley at 1 fps; we use an open-vocabulary object detection model, OWLv2 [225], for embedding object detection information and CLIP ViT-L14 embeddings [226]" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 113, + 69, + 497, + 435 + ], + "blocks": [ + { + "bbox": [ + 113, + 69, + 497, + 435 + ], + "lines": [ + { + "bbox": [ + 113, + 69, + 497, + 435 + ], + "spans": [ + { + "bbox": [ + 113, + 69, + 497, + 435 + ], + "type": "table", + "html": "
Question TypeSample Questions
Action RecognitionWhat is the process being performed on the sandpaper? \nWhat is the action shown?
Action SequenceWhat does the person do after brewing the tea? \nWhat does the person do before marking the vinyl with a pencil?
Counting ProblemsWhat is the quantity of universal down cleaner being poured into the task area? \nHow many branches does the person cut in total? \nHow many times does the person spray Greased Lightning onto the ketchup spill?
Movement DirectionIn what direction is the black welding tool pointing while the person is working on the metal joint? \nHow does the person chop the garlic with the knife?
Object AttributesWhat is the color of the seatpost shown in the video segment? \nWhat is the shape of the tube at the end of the step? \nWhat is the size of the knife being used to chop the spring onions?
Object LocationWhere does the person put the honey bottle away? \nWhere does the person position the clothes before ironing?
Object RecognitionWhat type of roller and paint are being used? \nWhat does the person place on top of the smooth half of the egg carton? \nWhat was the person initially holding in their left hand?
Object StateHow would you describe the sink at the beginning of the cleaning process? \nWhat is the state of the nematode after mixing it with water and sponge?
OtherAt what point in the video is the person seen holding the wires?
PoseHow are the woman's legs positioned while she is sitting? \nHow bent is the left elbow during the activity?
Spatial RelationsHow far is the bias tape maker from the right edge of the ironing board? \nWhat is the spatial relationship between the bowls and the Brussels sprouts on the kitchen countertop?
Speed/ForceHow would you describe the consistency of pressure applied during sanding? \nHow fast does the person initially push the stone?
", + "image_path": "48e62c91a4f9cb332ecf9e55e6ff53d1d47295a99bc55f91839966cf6ebf9686.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 182, + 440, + 427, + 452 + ], + "lines": [ + { + "bbox": [ + 182, + 440, + 427, + 452 + ], + "spans": [ + { + "bbox": [ + 182, + 440, + 427, + 452 + ], + "type": "text", + "content": "Table 17: PLM-FGQA question types and sample questions" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 474, + 504, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 497 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 497 + ], + "type": "text", + "content": "for scene classification information. We incorporate the textual annotations directly into language prompts using the following template:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 504, + 265, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 504, + 265, + 514 + ], + "spans": [ + { + "bbox": [ + 110, + 504, + 265, + 514 + ], + "type": "text", + "content": "Automatic answer generation prompt" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 109, + 517, + 500, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 517, + 500, + 548 + ], + "spans": [ + { + "bbox": [ + 109, + 517, + 500, + 548 + ], + "type": "text", + "content": "A video is showing a task [video level task name], specifically the part where [ASR caption]. Here is what we already know about the video: [existing question-answer pairs]. Answer this question in detail: [question]" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "text", + "content": "The off-the-shelf embeddings are incorporated into the PLM input via an additional Perceiver-IO[227] tokenizer, which summarizes the embeddings at the segment level." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 582, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 616 + ], + "type": "text", + "content": "We fine-tune the answer generator on 1M manually annotated QA pairs. After fine-tuning, we deploy the trained answer generator with privileged information access on the unlabelled questions produced in the previous step, to produce automatic answers." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 627, + 224, + 638 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 224, + 638 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 224, + 638 + ], + "type": "text", + "content": "G.1.4 Human Annotation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 647, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 504, + 669 + ], + "type": "text", + "content": "After obtaining segments and generating questions and automatic answers, we employ human annotators to obtain high-quality answers. Our answer annotations include the following:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 132, + 677, + 506, + 723 + ], + "type": "text", + "content": "- Human-verified answers: Raters are provided with the model-generated answer and are asked to accept or reject the answer. They can reject questions for being irrelevant or unanswerable, and answers for being factually incorrect or lacking details. Accepted question-answer pairs proceed without changes, while rejected ones are handled differently:" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 139, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 139, + 72, + 504, + 106 + ], + "type": "text", + "content": "question-related rejections (irrelevant or unanswerable) are discarded, whereas answer-related rejections (factually incorrect or lacking details) are marked for correction in the next phase. " + }, + { + "bbox": [ + 139, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "17.8\\%" + }, + { + "bbox": [ + 139, + 72, + 504, + 106 + ], + "type": "text", + "content": " of the total training samples are human-verified automatic answers." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "spans": [ + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "type": "text", + "content": "- Human annotated answers: Raters answer the questions from scratch by ensuring to cover all the relevant details within the temporal segment. They receive reference information, such as video-level task names and ASR captions, and may use online resources like WikiHow for additional context. Questions that cannot be answered based on the video segment (for example, due to some false premise) are rejected (with an explanation). These manually annotated answers make up " + }, + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "type": "inline_equation", + "content": "82.2\\%" + }, + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "type": "text", + "content": " of the PLM-FGQA training split, and " + }, + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 132, + 111, + 504, + 188 + ], + "type": "text", + "content": " of the evaluation set." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "text", + "content": "Quality Control. Data quality is crucial for model success. We followed several strategies to monitor and enhance annotation quality: annotation Certification - we reviewed a small sample of annotations from each rater before they could work in production queues, ensuring that annotators met high-quality standards before advancing to production; golden Examples - annotators were provided with high-quality annotation examples, highlighting common error patterns and offering acceptable answers. targeted and Dual QA - we conducted daily audits, including vendor auditing and our own sampled quality control. In total, " + }, + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "inline_equation", + "content": "13\\%" + }, + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "text", + "content": " of the training set was audited, and " + }, + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "text", + "content": " of the samples in PLM-VideoBench underwent quality control." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 300, + 301, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 301, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 301, + 313 + ], + "type": "text", + "content": "G.2 FGQA PLM-VideoBench Construction" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 209, + 329, + 399, + 462 + ], + "blocks": [ + { + "bbox": [ + 209, + 329, + 399, + 462 + ], + "lines": [ + { + "bbox": [ + 209, + 329, + 399, + 462 + ], + "spans": [ + { + "bbox": [ + 209, + 329, + 399, + 462 + ], + "type": "table", + "html": "
TrainTest
Sources stats
Total Videos767k3.6k
Unique Source Videos251k1.9
Average Duration (sec.)9.812.3
Annotations stats
Number of QA Pairs2.4M4.2k
Number Question Types1212
Question Length (avg/max)12/11412.3/56
Answer Length (avg/max)13.3/91114.1/62
Annotation TypeHumanHuman
Open-DomainYesYes
", + "image_path": "843cf4224cafa9d4d49787a9e773e2c299b0ce83ddfd981d9e5567f64e76f276.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 464, + 504, + 486 + ], + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 486 + ], + "type": "text", + "content": "Table 18: Statistics of the PLM-FGQA training and test data. The test split refers to the FGQA module of PLM-VideoBench." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 508, + 504, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 504, + 564 + ], + "type": "text", + "content": "The FG-QA component of PLM-VideoBench is formed from a held-out portion of PLM-FGQA. We refine this set and transform it into a challenging MCQ-based benchmark by (1) generating MCQs, (2) filtering out samples that can be answered by text-only (blind) LLMs, (3) performing human verification of negatives, and (4) balancing the distribution of question types and domains. The statistics of the dataset are summarized in Table 18. In more detail the steps we followed are:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 568, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 504, + 624 + ], + "type": "text", + "content": "MCQ Generation: To transform QAs into challenging MCQs for evaluation, instead of generating random incorrect answers, we prompt LLMs to produce hard negatives that are semantically close to the correct answer. We use the following prompt which was designed to generate distractors that differ from the correct answer by only a single detail. In effect this enables evaluation to assess fine-grained reasoning about object attributes and tool distinctions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 628, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 673 + ], + "type": "text", + "content": "Filtering Text-Only Answers: To ensure that video-based reasoning is required, we test whether a text-only LLM can answer the question correctly without seeing the video. If a question can be answered correctly from text alone, we remove or modify it to emphasize visual and temporal grounding." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "Human Verification of Negatives: Automatically generated negatives may sometimes be factually true despite being labeled as incorrect. To address this, we perform human verification, where annotators review distractors to confirm that they are both plausible yet definitively incorrect given the video context.MCQs with ambiguous distractors are removed." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "Balancing Question Types: Finally, after the above postprocessing and filtering is done, we rebalance the test set, to make sure that the question type and domain distributions are approximately uniform, by undersampling over-represented question types and domains." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 504, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 177 + ], + "type": "text", + "content": "Note on the evaluation metric. We report the multi-binary accuracy (MBAcc) [99] to evaluate on the FG-QA task. This accuracy is calculated by comparing the correct answer to each distractor individually. Specifically, for each question, we generate a series of binary questions, where the correct answer is compared with one distractor at a time. A prediction is considered correct only if the correct answer is consistently selected across all binary comparisons. We preferred this metric to vanilla MCQ accuracy as it greatly reduces the predictability of automatically-generated MCQs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 189, + 208, + 199 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 189, + 208, + 199 + ], + "spans": [ + { + "bbox": [ + 110, + 189, + 208, + 199 + ], + "type": "text", + "content": "MCQ generation prompt" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 202, + 332, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 202, + 332, + 211 + ], + "spans": [ + { + "bbox": [ + 111, + 202, + 332, + 211 + ], + "type": "text", + "content": "Here is a question and answer pair about a video:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 213, + 171, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 213, + 171, + 221 + ], + "spans": [ + { + "bbox": [ + 111, + 213, + 171, + 221 + ], + "type": "text", + "content": "Q: [question]" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 223, + 162, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 162, + 232 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 162, + 232 + ], + "type": "text", + "content": "A: [answer]" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 232, + 499, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 232, + 499, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 232, + 499, + 361 + ], + "type": "text", + "content": "You need to transform this into a high-quality multiple-choice question. To do this, first rephrase the given correct answer and then provide n distractor answers. The n incorrect answers should be reasonable and valid responses to the question, but should have a different meaning than the correct answer. You generate an incorrect answer from the correct one by changing a single detail, e.g. an object or verb/action that is relevant to what's being asked. Make the incorrect answers realistic, plausible and similar enough to the correct answer so that it is very difficult for someone to distinguish between them with prior knowledge alone. Finding the correct answer should also require visual information about the scene. The distractor answers should answer the question, but should be incorrect but in a non-obvious way. When changing a single detail to create the distractors, make sure that this detail is the main point of the question. For example, if the question is about the color of an object, then the distractor should change the color of the object and not the kind of object." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 361, + 499, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 361, + 499, + 381 + ], + "spans": [ + { + "bbox": [ + 111, + 361, + 499, + 381 + ], + "type": "text", + "content": "Here are some examples of good distractors (desired) and bad distractors (to be avoided):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 382, + 418, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 382, + 418, + 392 + ], + "spans": [ + { + "bbox": [ + 111, + 382, + 418, + 392 + ], + "type": "text", + "content": "Q: What is the person wearing on their hands while applying varnish?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 392, + 498, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 392, + 498, + 411 + ], + "spans": [ + { + "bbox": [ + 111, + 392, + 498, + 411 + ], + "type": "text", + "content": "A: The person is wearing white gloves on their hands while applying varnish with a brush." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 411, + 188, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 411, + 188, + 420 + ], + "spans": [ + { + "bbox": [ + 111, + 411, + 188, + 420 + ], + "type": "text", + "content": "Good distractors:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 421, + 496, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 421, + 496, + 440 + ], + "spans": [ + { + "bbox": [ + 111, + 421, + 496, + 440 + ], + "type": "text", + "content": "- The person is wearing black gloves on their hands while applying varnish with a brush. Bad distractors:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 441, + 496, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 441, + 496, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 441, + 496, + 460 + ], + "type": "text", + "content": "- The person is wearing black gloves on their hands while applying paint with a roller. .. More examples & formatting ..." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 483, + 224, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 224, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 224, + 496 + ], + "type": "text", + "content": "H PLM-STC Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 511, + 504, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 504, + 622 + ], + "type": "text", + "content": "We present PLM Spatio-Temporal Captions (PLM-STC), a novel dataset aimed at training and evaluating VLMs for spatial-temporal reasoning. We collected pairs of mask tablets for objects in videos, along with their corresponding detailed temporal descriptions. The annotations are collected on top of the SA-V [124] videos, which are diverse and high-quality. We excluded the test set videos from SA-V, to avoid any data cross contamination. Table 20 provides statistics about the dataset, such as number of total samples, training/val/test splits, object types, and time-segment duration. PLM-STC, is not only novel, but also larger and higher quality compared to existing datasets, see Table 19. In Fig. 5 (right), we show an example of our spatio-temporal captions, describing a little girl (highlighted in blue): (frame 0-81): A little girl moves back as beluga whale approaches her face. (frame 82-85): Out of frame. (frame 86-98): She tries to feed the whale." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "content": "We describe the overall annotation process in Appendix H.1, and how we build the three sub-tasks in Appendix H.2." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 666, + 217, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 217, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 217, + 677 + ], + "type": "text", + "content": "H.1 Annotation Process" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 689, + 504, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 721 + ], + "type": "text", + "content": "The annotation process is summarized in Figure 14. The annotation process involves three stages: Object Selection and Tracking, Temporal Segmentation and Captioning and Verification and Quality Control." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 148, + 70, + 463, + 186 + ], + "blocks": [ + { + "bbox": [ + 148, + 70, + 463, + 186 + ], + "lines": [ + { + "bbox": [ + 148, + 70, + 463, + 186 + ], + "spans": [ + { + "bbox": [ + 148, + 70, + 463, + 186 + ], + "type": "table", + "html": "
DatasetSpatial TypeYear#VideosRegionsTemp. Seg.Captions?
DAVIS16-RVOS [228]Segmentation20185050-No
DAVIS17-RVOS [229]Segmentation201890205-No
YouCook2-BB [83]BBox2018647-4.3KNo
A2D Sentence [230]Segmentation20183.7K4.8K-No
J-HMDB Sentence [231]Segmentation2018928928-No
ActivityNet Entities [232]BBox201914.3K1.5M52KNo
VidSTG [9]BBox20206.9K44.8K-No
Refer-Youtube-VOS [233]Segmentation20203.9K7.5K-No
HC-STVG [234]BBox202116K16K-No
VLN [123]Mouse Trace202350K43.1K43.1KYes
MeVis [235]Segmentation20232K8.8K-No
PLM-STCSegmentation202545.7K122.3K194.2KYes
", + "image_path": "dce981b90649f3333bcbeecafe451dd31d77240bd5b444c91b0d0b02ac2b5874.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 176, + 190, + 434, + 203 + ], + "lines": [ + { + "bbox": [ + 176, + 190, + 434, + 203 + ], + "spans": [ + { + "bbox": [ + 176, + 190, + 434, + 203 + ], + "type": "text", + "content": "Table 19: Spatio-Temporal-Captioning datasets comparison." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 106, + 220, + 504, + 264 + ], + "blocks": [ + { + "bbox": [ + 106, + 220, + 504, + 264 + ], + "lines": [ + { + "bbox": [ + 106, + 220, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 504, + 264 + ], + "type": "image", + "image_path": "9d17dc536750b2f00b965dc2e9b92faf6895b4a18767e1761d0d1e8226c4a309.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 213, + 269, + 395, + 281 + ], + "lines": [ + { + "bbox": [ + 213, + 269, + 395, + 281 + ], + "spans": [ + { + "bbox": [ + 213, + 269, + 395, + 281 + ], + "type": "text", + "content": "Figure 14: PLM-STC Annotation pipeline." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 309, + 270, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 309, + 270, + 322 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 270, + 322 + ], + "type": "text", + "content": "H.1.1 Object Selection and Tracking" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 331, + 506, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 462 + ], + "type": "text", + "content": "Annotators select interesting objects with significant motion changes in the video and use SAM 2 [124] to generate initial mask tablets, which they then refine to ensure high-quality spatial-temporal segmentation. We instructed the annotators by defining interesting regions in video footage as those with the presence of significant, dynamic actions performed by subjects, which can be human, animal, or object. These regions involve multiple major actions that evolve over time, rather than static or insignificant actions. We provided annotators with examples of interesting regions, such as one featuring a person making a sandwich, a dog chasing a cat, or a kite getting stuck in a tree. The goal for the annotator is to identify regions with high delta, where the subject performs a sequence of significant activities that change over time, such as a person entering a room, sitting down, and then drinking from a glass. By focusing on these dynamic and evolving actions, annotators can effectively select regions worthy of captioning. Finally, annotators are provided with several examples of good and bad annotations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 483, + 312, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 312, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 312, + 495 + ], + "type": "text", + "content": "H.1.2 Temporal Segmentation and Captioning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 505, + 506, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 506, + 593 + ], + "type": "text", + "content": "Based on the selected mask tablets, another set of annotators provides time segments for each action and fills in the caption within each time segment. The annotators are instructed to focus on capturing major actions, avoiding minor details or unnecessary movements. When writing captions for each segment, they must ensure clarity in describing the subject's movements and directionality. Additionally, the annotators are advised to avoid making assumptions about the subject's actions or adding details not clearly visible, sticking only to what is directly observable in the frame. As in the previous task, the annotators are provided with several examples of good and bad annotations to guide their work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 612, + 279, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 612, + 279, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 612, + 279, + 624 + ], + "type": "text", + "content": "H.1.3 Verification and Quality Control" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 504, + 723 + ], + "type": "text", + "content": "A final set of annotators manually verifies the tablets and time-segment captions to ensure accuracy and consistency. For mask refinement, we re-run the same pipeline as §H.1.1, while not letting the annotators choose the interesting object, but only refine the quality of the mask. For captioning refinement, the annotators are tasked with three objectives: 1) Redundancy: eliminate any repeating or redundant information to ensure the caption is concise; 2) Accuracy: verify that every word in the caption accurately describes a fact present in the video, correcting or removing any incorrect information; and 3) Actions: add missing major action information to the caption while preserving existing atomic actions, ensuring the caption effectively conveys the key events in the video." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 176, + 70, + 432, + 241 + ], + "blocks": [ + { + "bbox": [ + 176, + 70, + 432, + 241 + ], + "lines": [ + { + "bbox": [ + 176, + 70, + 432, + 241 + ], + "spans": [ + { + "bbox": [ + 176, + 70, + 432, + 241 + ], + "type": "table", + "html": "
AllTrainValTest
Dataset stats
Number of Videos45.2K42.0K8042.3K
Spatio Temporal Caption127.8K---
Temporal Caption198.7K---
Tube's categories
Person104.5K99.6K8612.4K
Animal16.8K13.2K5501.7K
Object/things6.4K4.4K4361.2K
Temporal captions per Tube
1 caption per tube78.9K73.9K8422.4K
2 caption per tube30.9K27.8K5661.7K
3 or more Caption per tube16.38K14.15K4211.2K
Tasks stats
Region Detailed Captioning (RDCap)122.3K117.2K2.5K2.6K
Region Captioning (RCap)194.2K179.5K4.6K10.1K
Region Temporal Localization (RTLoc)192.0K179.5K4.6K7.9K
", + "image_path": "4d825ae476a074dbec45eef2c486a29543783462afb9b5ea512cff79f913689b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": "Table 20: PLM-STC dataset statistics. Note the for RTLoc, we filter the test set to include only the captions that are unambiguously localized, i.e., they map to a single time window in the video. As a result, the test set size is reduced to 7,910 instances compared to RCap." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 307, + 232, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 232, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 232, + 319 + ], + "type": "text", + "content": "H.2 PLM-STC Benchmark" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 328, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 504, + 384 + ], + "type": "text", + "content": "We utilize the collected data to train and evaluate the PLM on three challenging tasks that are essential for video perception. Firstly, we created a balanced validation and test split by the combination of tube categories and number of caption per tube while making sure no video overlaps with the training set. This is done to make sure we evaluate all the categories presents in the dataset equally. Then, we process the data for each task:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 388, + 504, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 433 + ], + "type": "text", + "content": "- Dense Video Region Captioning (RDCap). This comprehensive task combines both \"what\" and \"when\" aspects. The model takes the video and the tubelets as input and outputs the full time-segment captions. We also assign an out of frame caption to temporal segments for which the subject does not appear in the video to ensure dense temporal coverage of events across the video duration." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 437, + 506, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 506, + 482 + ], + "type": "text", + "content": "Video Region Captioning (RCap). This task involves describing \"what\" activities are performed within a specific time frame by the objects in the tubelets. The model receives the video, the tubelets, and the temporal region as input and outputs the corresponding captions. We filter out events that refer to the subject when it is out-of-frame to avoid evaluating trivial captions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 487, + 506, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 506, + 564 + ], + "type": "text", + "content": "Region Temporal Localization (RTLoc). This task requires the model to localize \"when\" specific events occur in relation to a given tubelet. The input includes the video, the tubelet, and the caption, while the output is the start and end frames indicating when the captioned event occurs. Like RCap, we filter out out-of-frame events, as well as ambiguous events that may be localized to multiple time segments. For example, if the subject opens the door twice, the event text is guaranteed to be unique (e.g., referring to the first and second time they opened the door) or dropped entirely if ambiguous (e.g., if the text only mentions the action)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 568, + 506, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 506, + 602 + ], + "type": "text", + "content": "These tasks are designed to both improve and evaluate the model's capabilities, with the same input-output format applied during both training and evaluation. Figure 6 illustrate an examples of the task, including the prompt used to train and evaluate the PLM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 620, + 226, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 226, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 226, + 632 + ], + "type": "text", + "content": "I Smart Glasses Data" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 646, + 260, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 260, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 260, + 657 + ], + "type": "text", + "content": "I.1 Data collection and annotation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "type": "text", + "content": "We collected the source videos for PLM-SGQA using commercial smart glasses, which enable participants to capture egocentric videos in a hands-free manner. Participants are presented with 14 categories of popular scenarios, such as shopping, cooking, and walking in a neighborhood, and are instructed to ask questions about their surroundings as if interacting with a multi-modal assistant that shares their visual perspective. Specifically, participants are asked to ask questions spontaneously," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "without delay, about the things they see and experience, and to focus on visual queries rather than dynamic information that may change regularly. After recording the videos, participants annotate the segments by marking the start and end points of the video relevant to each question, as well as providing the ground-truth answer." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 129, + 211, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 129, + 211, + 141 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 211, + 141 + ], + "type": "text", + "content": "I.2 SGQA Benchmark" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 149, + 506, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 149, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 149, + 506, + 248 + ], + "type": "text", + "content": "To create the SGQA component of PLM-VideoBench we first filtered the Q&As using an LLM to obtain a shortlist of questions that focus on human activity and also are perception-based rather than based on general knowledge. This means that SGQA focus on questions that require good visual understanding of the scene to be accurately answered. This process yields an evaluation set consisting of 655 Q&As. For the resulting Q&As, we then trimmed the original videos to obtain clips within the temporal boundary that the human wearer/annotator specified. As the annotated segments end at the point where the smart-glass wearer asks the question, it is important for all evaluations to specify that the question refers to the end of the video clip - e.g. see the prompt we used for PLM and baselines evaluation in 10. We summarize the statistics of the SGQA test set in Figures 15 and 16." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 127, + 275, + 254, + 365 + ], + "blocks": [ + { + "bbox": [ + 127, + 275, + 254, + 365 + ], + "lines": [ + { + "bbox": [ + 127, + 275, + 254, + 365 + ], + "spans": [ + { + "bbox": [ + 127, + 275, + 254, + 365 + ], + "type": "table", + "html": "
Sources stats
Total Videos663
Average Duration (sec.)29.4
Annotations stats
Number of QA Pairs665
Number Domains14
Question Length (avg/max)9.0 / 52
Answer Length (avg/max)21.6 / 40
Annotation TypeHuman
Open-DomainYes
", + "image_path": "9a4eb11215c651515e62e6429fc7934bc585298496e5b030e5de5e0d8c25b3b3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 126, + 370, + 256, + 392 + ], + "lines": [ + { + "bbox": [ + 126, + 370, + 256, + 392 + ], + "spans": [ + { + "bbox": [ + 126, + 370, + 256, + 392 + ], + "type": "text", + "content": "Figure 15: Statistics of the PLMSGQA test data." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 259, + 264, + 481, + 377 + ], + "blocks": [ + { + "bbox": [ + 259, + 264, + 481, + 377 + ], + "lines": [ + { + "bbox": [ + 259, + 264, + 481, + 377 + ], + "spans": [ + { + "bbox": [ + 259, + 264, + 481, + 377 + ], + "type": "image", + "image_path": "deb55031b0dcd69c607f38cdac47b1dcfd24e19c9457a8c15e649704593f1dbe.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 254, + 387, + 485, + 410 + ], + "lines": [ + { + "bbox": [ + 254, + 387, + 485, + 410 + ], + "spans": [ + { + "bbox": [ + 254, + 387, + 485, + 410 + ], + "type": "text", + "content": "Figure 16: Domain distribution of video-clips in PLMSGQA." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 412, + 241, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 412, + 241, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 412, + 241, + 426 + ], + "type": "text", + "content": "J Synthetic Data Engine" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 437, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 471 + ], + "type": "text", + "content": "Our data engine targets base capabilities of VLMs: image captioning, visual question answering, OCR, chart/diagram understanding, and video understanding. We developed different pipelines for images and videos, and includes different levels of metadata to generate captions and QAs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "type": "text", + "content": "Image Captions: We caption high-quality images using Llama 3.1V 90B. An example is shown in Figure 17. We use this pipeline to caption SA1B [105], Object365 [135], and OpenImages [136]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 502, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 504, + 536 + ], + "type": "text", + "content": "OCR QAs: We leverage pre-extracted OCR and use it as input for a LLM (i.e., Llama 3.3 70B) to generate a set of five question-answer pairs. An example is shown in Figure 18. We use this pipeline to generate QAs for PDFAcc [132], and UCSF [133]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 540, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 597 + ], + "type": "text", + "content": "Image Captioning plus QAs: In cases for which OCR does not provide enough information to create questions (e.g., scientific figures), we further caption the image using Llama 3.1V 90B. Then we pass the caption with auxiliary metadata (e.g., OCR) to a LLM (i.e., Llama 3.3 70B) to generate question-answers pairs. An example is shown in Figure 19). We use this pipeline to generate captions and QAs for ArxivQA [59], DocVQA [53], InfoVQA [56] and Ai2d [55]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "type": "text", + "content": "Video Captioning plus QAs: An image captioner is run on key-frames of the video, as well as a video captioner on the overall video at 1 fps. The result captions are passed to a LLM (i.e., Llama 3.3 70B, or Llama 3 405B) with additional metadata (e.g., video title etc.), so to generate a detailed caption and a multiple-chosen question answers pair. An example is shown in Figure 20). We use this pipeline to generate captions and QAs for YT-1B [236], Ego4d [115] " + }, + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "type": "text", + "content": ", DiDeMo [140], Charades [139], and Kinetics710 [121] " + }, + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 104, + 601, + 506, + 667 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 689, + 361, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 689, + 361, + 700 + ], + "spans": [ + { + "bbox": [ + 118, + 689, + 361, + 700 + ], + "type": "text", + "content": "3For this dataset we used Llama3 405B, rather than Llama 3.3 70B" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 700, + 504, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 721 + ], + "type": "text", + "content": "4DiDeMo, Charades, Kinetics710 used a simpler pipeline where only frame captions were used, and a smaller scale LLM (i.e., Llama 3.1 8B)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 346, + 83, + 420, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 83, + 420, + 94 + ], + "spans": [ + { + "bbox": [ + 346, + 83, + 420, + 94 + ], + "type": "text", + "content": "Detailed Caption" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 107, + 115, + 247, + 221 + ], + "blocks": [ + { + "bbox": [ + 107, + 115, + 247, + 221 + ], + "lines": [ + { + "bbox": [ + 107, + 115, + 247, + 221 + ], + "spans": [ + { + "bbox": [ + 107, + 115, + 247, + 221 + ], + "type": "image", + "image_path": "1268ac9fb233bb4929bb4dab04092dfd0cbace7e85b8ed3fbef0c19086670027.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 255, + 98, + 517, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 98, + 517, + 237 + ], + "spans": [ + { + "bbox": [ + 255, + 98, + 517, + 237 + ], + "type": "text", + "content": "The image depicts a yellow building with a red door and a sign that reads \"YOLCU SALONU\" in black letters. The building has a gable roof and a small window on the left side. There are two trees in front of the building, one on each side of the entrance. The ground is wet, and there is snow on the trees and the ground. In the background, there is a train station with a train on the tracks. The sky is gray and overcast. The image appears to be a photograph taken on a cold winter day. The presence of snow and the gray sky suggest that it is a winter scene. The building and the train station in the background suggest that it is a public place, possibly a train station or a bus stop. The sign on the building reads \"YOLCU SALONU,\" which is Turkish for \"Passenger Lounge.\" This suggests that the building is a waiting area for passengers. Overall, the image depicts a quiet and peaceful winter scene, with a focus on the building and the surrounding environment." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 194, + 243, + 429, + 270 + ], + "blocks": [ + { + "bbox": [ + 194, + 243, + 429, + 270 + ], + "lines": [ + { + "bbox": [ + 194, + 243, + 429, + 270 + ], + "spans": [ + { + "bbox": [ + 194, + 243, + 429, + 270 + ], + "type": "image", + "image_path": "31a57553c561071222c0d6a49171e30a665fd047b52d892d96493b8bb064d276.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 279, + 504, + 301 + ], + "lines": [ + { + "bbox": [ + 105, + 279, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 504, + 301 + ], + "type": "text", + "content": "Figure 17: Detailed caption example, and the corresponding pipeline. The captioner (i.e., Llama 3V 90B) is prompted to generate the caption for the provided image." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 401, + 330, + 425, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 401, + 330, + 425, + 340 + ], + "spans": [ + { + "bbox": [ + 401, + 330, + 425, + 340 + ], + "type": "text", + "content": "OCR" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 106, + 345, + 517, + 475 + ], + "blocks": [ + { + "bbox": [ + 106, + 345, + 517, + 475 + ], + "lines": [ + { + "bbox": [ + 106, + 345, + 517, + 475 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 517, + 475 + ], + "type": "image", + "image_path": "4fe6fb4b3ec1c97204688a86055c7da0d2130a401a1b67f4a929d0c3a451e99b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 271, + 479, + 351, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 479, + 351, + 489 + ], + "spans": [ + { + "bbox": [ + 271, + 479, + 351, + 489 + ], + "type": "text", + "content": "Question Answers" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 494, + 510, + 624 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 494, + 266, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 494, + 266, + 504 + ], + "spans": [ + { + "bbox": [ + 111, + 494, + 266, + 504 + ], + "type": "text", + "content": "Q: How many teams qualify for the finals?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 112, + 504, + 510, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 504, + 510, + 522 + ], + "spans": [ + { + "bbox": [ + 112, + 504, + 510, + 522 + ], + "type": "text", + "content": "A: The top 3 teams from each section, plus the next two highest teams on points, qualify for the finals, making a total of 5 teams." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 112, + 524, + 299, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 524, + 299, + 534 + ], + "spans": [ + { + "bbox": [ + 112, + 524, + 299, + 534 + ], + "type": "text", + "content": "Q: How many points has the Buderim team earned?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 112, + 534, + 274, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 534, + 274, + 544 + ], + "spans": [ + { + "bbox": [ + 112, + 534, + 274, + 544 + ], + "type": "text", + "content": "A: The Buderim team has earned 150 points." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 112, + 544, + 256, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 544, + 256, + 553 + ], + "spans": [ + { + "bbox": [ + 112, + 544, + 256, + 553 + ], + "type": "text", + "content": "Q: Which team is currently in the lead?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 112, + 554, + 391, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 554, + 391, + 563 + ], + "spans": [ + { + "bbox": [ + 112, + 554, + 391, + 563 + ], + "type": "text", + "content": "A: The Albert team is currently in the lead with 36 points and a margin of 39." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 112, + 564, + 313, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 564, + 313, + 573 + ], + "spans": [ + { + "bbox": [ + 112, + 564, + 313, + 573 + ], + "type": "text", + "content": "Q: What is the current margin of the Mooloolaba team?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 112, + 574, + 298, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 574, + 298, + 583 + ], + "spans": [ + { + "bbox": [ + 112, + 574, + 298, + 583 + ], + "type": "text", + "content": "A: The Mooloolaba team has a current margin of 1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 112, + 583, + 293, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 583, + 293, + 593 + ], + "spans": [ + { + "bbox": [ + 112, + 583, + 293, + 593 + ], + "type": "text", + "content": "Q: What is the overall summary of the document?" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 594, + 510, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 594, + 510, + 624 + ], + "spans": [ + { + "bbox": [ + 111, + 594, + 510, + 624 + ], + "type": "text", + "content": "A: The document appears to be a standings table for the Ladies Division Three of a bowling or lawn bowls competition, showing the current standings of various teams, including their win-loss records, points, and margins." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 293, + 635, + 329, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 635, + 329, + 646 + ], + "spans": [ + { + "bbox": [ + 293, + 635, + 329, + 646 + ], + "type": "text", + "content": "Pipeline" + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 192, + 651, + 430, + 679 + ], + "blocks": [ + { + "bbox": [ + 192, + 651, + 430, + 679 + ], + "lines": [ + { + "bbox": [ + 192, + 651, + 430, + 679 + ], + "spans": [ + { + "bbox": [ + 192, + 651, + 430, + 679 + ], + "type": "image", + "image_path": "0cee851dbcab299eafe247b722f18feb598d44c3a07241c1e4767a2c4fff2798.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 687, + 504, + 711 + ], + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 711 + ], + "type": "text", + "content": "Figure 18: Visual Question Answering pairs and the corresponding pipeline. The OCR text is extracted from the image, and passed to the LLM (i.e., Llama 3.3 70B) to generate QA pairs." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 401, + 167, + 425, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 401, + 167, + 425, + 177 + ], + "spans": [ + { + "bbox": [ + 401, + 167, + 425, + 177 + ], + "type": "text", + "content": "OCR" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 143, + 182, + 267, + 287 + ], + "blocks": [ + { + "bbox": [ + 143, + 182, + 267, + 287 + ], + "lines": [ + { + "bbox": [ + 143, + 182, + 267, + 287 + ], + "spans": [ + { + "bbox": [ + 143, + 182, + 267, + 287 + ], + "type": "image", + "image_path": "8e79177a0ff95b0e8f386b3c911fa139e72510a8be6c7f623a43f333b8042601.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 315, + 223, + 516, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 223, + 516, + 245 + ], + "spans": [ + { + "bbox": [ + 315, + 223, + 516, + 245 + ], + "type": "text", + "content": "SB 1011 10 Fraction of objects 0.2 0.1 0.0 0.01 0.1 1.0 10.0 erg s1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 273, + 289, + 348, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 289, + 348, + 300 + ], + "spans": [ + { + "bbox": [ + 273, + 289, + 348, + 300 + ], + "type": "text", + "content": "Detailed Caption" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "spans": [ + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": "The image depicts a histogram of the distribution of objects, with the x-axis labeled \"LSB[1046ergs-1]\" and the y-axis labeled \"Fraction of objects.\" The x-axis ranges from 0.01 to 10.0, while the y-axis ranges from 0.0 to 0.2. The histogram is divided into bins of varying widths, with the first bin spanning from 0.01 to 0.1, the second bin spanning from 0.1 to 1.0, and so on. Each bin contains a bar representing the fraction of objects within that range. The bars are shaded in gray, with some bins having multiple bars. A key feature of the histogram is the presence of a peak in the middle bin, which corresponds to an " + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": " value of around 1.0. This suggests that the majority of objects have an " + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": " value close to this value. The histogram also shows a tail extending towards higher " + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": " values, indicating that some objects have significantly higher " + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": " values than the majority. Overall, the histogram provides a visual representation of the distribution of " + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": " values among the objects being studied. It allows for easy identification of patterns and trends in the data, such as the peak in the middle bin and the tail towards higher " + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 110, + 305, + 512, + 416 + ], + "type": "text", + "content": " values." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 223, + 419, + 399, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 419, + 399, + 431 + ], + "spans": [ + { + "bbox": [ + 223, + 419, + 399, + 431 + ], + "type": "text", + "content": "Multi-Choice Question Answer (MCQA)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 434, + 420, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 434, + 420, + 445 + ], + "spans": [ + { + "bbox": [ + 111, + 434, + 420, + 445 + ], + "type": "text", + "content": "What is the approximate " + }, + { + "bbox": [ + 111, + 434, + 420, + 445 + ], + "type": "inline_equation", + "content": "L_{SB}" + }, + { + "bbox": [ + 111, + 434, + 420, + 445 + ], + "type": "text", + "content": " value at which the majority of objects have their peak?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 445, + 174, + 504 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 112, + 445, + 144, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 445, + 144, + 454 + ], + "spans": [ + { + "bbox": [ + 112, + 445, + 144, + 454 + ], + "type": "text", + "content": "Options:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 112, + 456, + 139, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 456, + 139, + 464 + ], + "spans": [ + { + "bbox": [ + 112, + 456, + 139, + 464 + ], + "type": "text", + "content": "(A) 0.1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 112, + 465, + 139, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 465, + 139, + 474 + ], + "spans": [ + { + "bbox": [ + 112, + 465, + 139, + 474 + ], + "type": "text", + "content": "(B) 1.0" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 112, + 475, + 139, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 475, + 139, + 483 + ], + "spans": [ + { + "bbox": [ + 112, + 475, + 139, + 483 + ], + "type": "text", + "content": "(C) 5.0" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 112, + 484, + 144, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 484, + 144, + 494 + ], + "spans": [ + { + "bbox": [ + 112, + 484, + 144, + 494 + ], + "type": "text", + "content": "(D) 10.0" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 112, + 495, + 174, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 495, + 174, + 504 + ], + "spans": [ + { + "bbox": [ + 112, + 495, + 174, + 504 + ], + "type": "text", + "content": "Answer: (B) 1.0." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 292, + 508, + 329, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 508, + 329, + 519 + ], + "spans": [ + { + "bbox": [ + 292, + 508, + 329, + 519 + ], + "type": "text", + "content": "Pipeline" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 132, + 523, + 491, + 583 + ], + "blocks": [ + { + "bbox": [ + 132, + 523, + 491, + 583 + ], + "lines": [ + { + "bbox": [ + 132, + 523, + 491, + 583 + ], + "spans": [ + { + "bbox": [ + 132, + 523, + 491, + 583 + ], + "type": "image", + "image_path": "e412ee6336c6f3f3ecb37e27ff8cad8c9ab87a00109169edfb9921efea2bb8d3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 592, + 506, + 625 + ], + "lines": [ + { + "bbox": [ + 104, + 592, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 592, + 506, + 625 + ], + "type": "text", + "content": "Figure 19: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. The OCR text is extracted from the image, and the caption is generated by the captioner (i.e., Llama 3V 90B), which are all passed to the LLM (i.e., Llama 3.3 70B) to generate MCQAs." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 92, + 511, + 149 + ], + "blocks": [ + { + "bbox": [ + 111, + 92, + 511, + 149 + ], + "lines": [ + { + "bbox": [ + 111, + 92, + 511, + 149 + ], + "spans": [ + { + "bbox": [ + 111, + 92, + 511, + 149 + ], + "type": "image", + "image_path": "7a2b9a755d06734efc06641a316b5956bffb59da9244e42a1b4b231e624b84af.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 289, + 152, + 333, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 289, + 152, + 333, + 163 + ], + "spans": [ + { + "bbox": [ + 289, + 152, + 333, + 163 + ], + "type": "text", + "content": "Metadata" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 167, + 315, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 167, + 315, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 167, + 315, + 178 + ], + "type": "text", + "content": "Title: Lions VS Colts Highlights 2017 Preseason Game" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 178, + 347, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 347, + 189 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 347, + 189 + ], + "type": "text", + "content": "Description: Comment suggestions for future videos and Enjoy!" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 277, + 191, + 345, + 203 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 191, + 345, + 203 + ], + "spans": [ + { + "bbox": [ + 277, + 191, + 345, + 203 + ], + "type": "text", + "content": "Frame Caption" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 206, + 511, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 206, + 511, + 256 + ], + "spans": [ + { + "bbox": [ + 111, + 206, + 511, + 256 + ], + "type": "text", + "content": "Frame 435: The image shows a man with dreadlocks standing in front of a crowd of people in a stadium. He is wearing a white t-shirt and is surrounded by a group of people standing on the ground. On the left side of the image, there is a table fan, bottles, and other objects placed on a table. In the background, there are people sitting on chairs, stairs, railings, boards with text, lights, and the sky. The text on the boards reads \"Indianapolis Colts vs San Francisco 49ers" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 256, + 510, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 256, + 510, + 286 + ], + "spans": [ + { + "bbox": [ + 111, + 256, + 510, + 286 + ], + "type": "text", + "content": "Frame 585: The image shows a football game being played on a TV screen, with a group of people on the ground and a few people standing in the background. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 286, + 510, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 286, + 510, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 286, + 510, + 316 + ], + "type": "text", + "content": "Frame 765: The image shows a group of people playing a game of football on a green field, with white lines marking the boundaries of the field. At the bottom of the image, there is text and numbers indicating that the game is between the Indianapolis Colts and the Detroit Lions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 316, + 510, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 316, + 510, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 316, + 510, + 346 + ], + "type": "text", + "content": "Frame 945: The image shows a football game being played on a TV screen, with people wearing helmets and playing on the ground. At the bottom of the image, there is text and numbers indicating that the game is between the Detroit Lions and the Indianapolis Colts." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 279, + 350, + 343, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 350, + 343, + 361 + ], + "spans": [ + { + "bbox": [ + 279, + 350, + 343, + 361 + ], + "type": "text", + "content": "Video Caption" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 365, + 511, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 365, + 511, + 384 + ], + "spans": [ + { + "bbox": [ + 111, + 365, + 511, + 384 + ], + "type": "text", + "content": "Football players wearing helmets, Detroit Lions vs Indianapolis Colts, player running with ball, falls down, touchdown scored." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 273, + 387, + 348, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 387, + 348, + 399 + ], + "spans": [ + { + "bbox": [ + 273, + 387, + 348, + 399 + ], + "type": "text", + "content": "Detailed Caption" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 402, + 511, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 402, + 511, + 472 + ], + "spans": [ + { + "bbox": [ + 111, + 402, + 511, + 472 + ], + "type": "text", + "content": "A football player is running with the ball and then falls down, the game is between the Detroit Lions and the Indianapolis Colts, with players wearing blue and white uniforms and helmets, and people sitting in the stadium, a watermark on the image shows the teams playing, one player is laying on the ground next to another player wearing a white and blue jersey and a white helmet, a horseshoe-shaped object is on the grass, text on the image indicates a touchdown, players are wearing helmets and playing on the ground, with watermarks in the bottom left corner of the images, the game involves the Detroit Lions and the Indianapolis Colts, and also shows the Colts playing against the New England Patriots and the Lions playing against the Dallas Cowboys." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 223, + 475, + 399, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 475, + 399, + 487 + ], + "spans": [ + { + "bbox": [ + 223, + 475, + 399, + 487 + ], + "type": "text", + "content": "Multi-Choice Question Answer (MCQA)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 491, + 299, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 491, + 299, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 491, + 299, + 502 + ], + "type": "text", + "content": "What is the outcome of the football player's action?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 112, + 502, + 144, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 502, + 144, + 511 + ], + "spans": [ + { + "bbox": [ + 112, + 502, + 144, + 511 + ], + "type": "text", + "content": "Options:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 112, + 512, + 271, + 571 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 112, + 512, + 233, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 512, + 233, + 521 + ], + "spans": [ + { + "bbox": [ + 112, + 512, + 233, + 521 + ], + "type": "text", + "content": "(A) the player runs out of bounds" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 112, + 521, + 236, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 521, + 236, + 531 + ], + "spans": [ + { + "bbox": [ + 112, + 521, + 236, + 531 + ], + "type": "text", + "content": "(B) the player scores a touchdown" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 112, + 531, + 214, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 531, + 214, + 540 + ], + "spans": [ + { + "bbox": [ + 112, + 531, + 214, + 540 + ], + "type": "text", + "content": "(C) the player drops the ball" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 112, + 541, + 203, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 541, + 203, + 551 + ], + "spans": [ + { + "bbox": [ + 112, + 541, + 203, + 551 + ], + "type": "text", + "content": "(D) the player falls down" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 112, + 551, + 207, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 551, + 207, + 561 + ], + "spans": [ + { + "bbox": [ + 112, + 551, + 207, + 561 + ], + "type": "text", + "content": "(E) the player gets injured" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 112, + 561, + 271, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 561, + 271, + 571 + ], + "spans": [ + { + "bbox": [ + 112, + 561, + 271, + 571 + ], + "type": "text", + "content": "Answer: (B) the player scores a touchdown." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 292, + 574, + 329, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 574, + 329, + 586 + ], + "spans": [ + { + "bbox": [ + 292, + 574, + 329, + 586 + ], + "type": "text", + "content": "Pipeline" + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 133, + 590, + 491, + 648 + ], + "blocks": [ + { + "bbox": [ + 133, + 590, + 491, + 648 + ], + "lines": [ + { + "bbox": [ + 133, + 590, + 491, + 648 + ], + "spans": [ + { + "bbox": [ + 133, + 590, + 491, + 648 + ], + "type": "image", + "image_path": "6ae83b9a6dc806a8b301beb09601202a299d8e4d55734c7e55952388176da2f3.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 655, + 504, + 700 + ], + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 700 + ], + "type": "text", + "content": "Figure 20: Detailed Captions and Multi-Choice Question Answers (MCQAs) and the corresponding pipeline. An LLM (i.e., Llama 3.3 70B) with a video caption, frame captions at 1 fps, and additional metadata (e.g., video title etc.), generates a detailed caption and multiple-choices question answers pair." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 227, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 227, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 227, + 85 + ], + "type": "text", + "content": "K Qualitative Results" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 120, + 129, + 233, + 300 + ], + "blocks": [ + { + "bbox": [ + 120, + 129, + 233, + 300 + ], + "lines": [ + { + "bbox": [ + 120, + 129, + 233, + 300 + ], + "spans": [ + { + "bbox": [ + 120, + 129, + 233, + 300 + ], + "type": "image", + "image_path": "1530b58b8d9bea8c5654bbea62fd379cc6feae8a7b23471dfe80e80aea475eb0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 331, + 379, + 344 + ], + "lines": [ + { + "bbox": [ + 230, + 331, + 379, + 344 + ], + "spans": [ + { + "bbox": [ + 230, + 331, + 379, + 344 + ], + "type": "text", + "content": "Figure 21: PLM-8B OCR examples." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 255, + 103, + 454, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 103, + 454, + 114 + ], + "spans": [ + { + "bbox": [ + 255, + 103, + 454, + 114 + ], + "type": "text", + "content": "Question: Provide a detailed description of the image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 255, + 114, + 517, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 114, + 517, + 154 + ], + "spans": [ + { + "bbox": [ + 255, + 114, + 517, + 154 + ], + "type": "text", + "content": "Answer: The image is a pink postage stamp featuring a map of the Grenadines of St. Vincent, with a red border and black text. The map shows various locations on the island, including hotels, bays, and other points of interest." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 256, + 154, + 315, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 154, + 315, + 164 + ], + "spans": [ + { + "bbox": [ + 256, + 154, + 315, + 164 + ], + "type": "text", + "content": "*Key Features:*" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 255, + 173, + 516, + 254 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 255, + 173, + 516, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 173, + 516, + 193 + ], + "spans": [ + { + "bbox": [ + 255, + 173, + 516, + 193 + ], + "type": "text", + "content": "* *Map:* A pink map of the Grenadines of St. Vincent, with a red border and black text." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 255, + 194, + 516, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 194, + 516, + 213 + ], + "spans": [ + { + "bbox": [ + 255, + 194, + 516, + 213 + ], + "type": "text", + "content": "* *Locations:* Various locations on the island are marked, including hotels, bays, and other points of interest." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 255, + 213, + 490, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 213, + 490, + 224 + ], + "spans": [ + { + "bbox": [ + 255, + 213, + 490, + 224 + ], + "type": "text", + "content": "* *Scale:* A scale in miles is provided at the bottom of the map." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 255, + 224, + 516, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 224, + 516, + 243 + ], + "spans": [ + { + "bbox": [ + 255, + 224, + 516, + 243 + ], + "type": "text", + "content": "* *Denomination:* The denomination \"30c\" is printed in the bottom-right corner of the stamp." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 255, + 243, + 476, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 243, + 476, + 254 + ], + "spans": [ + { + "bbox": [ + 255, + 243, + 476, + 254 + ], + "type": "text", + "content": "* *Border:* The stamp has a red border with a serrated edge." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 255, + 262, + 338, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 262, + 338, + 274 + ], + "spans": [ + { + "bbox": [ + 255, + 262, + 338, + 274 + ], + "type": "text", + "content": "*Overall Impression:*" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 255, + 283, + 517, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 283, + 517, + 323 + ], + "spans": [ + { + "bbox": [ + 255, + 283, + 517, + 323 + ], + "type": "text", + "content": "The image appears to be a vintage postage stamp from the Grenadines of St. Vincent, featuring a map of the island and various locations of interest. The stamp has a classic design and is likely from the mid-20th century." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 200, + 361, + 299, + 432 + ], + "blocks": [ + { + "bbox": [ + 200, + 361, + 299, + 432 + ], + "lines": [ + { + "bbox": [ + 200, + 361, + 299, + 432 + ], + "spans": [ + { + "bbox": [ + 200, + 361, + 299, + 432 + ], + "type": "image", + "image_path": "ed04d39d909fe72eeb12444135a8e509aba0d6b6dc41389801c5e33f2f769e55.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 439, + 402, + 452 + ], + "lines": [ + { + "bbox": [ + 208, + 439, + 402, + 452 + ], + "spans": [ + { + "bbox": [ + 208, + 439, + 402, + 452 + ], + "type": "text", + "content": "Figure 22: PLM-8B OCR in the wild examples." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 372, + 411, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 372, + 411, + 402 + ], + "spans": [ + { + "bbox": [ + 309, + 372, + 411, + 402 + ], + "type": "text", + "content": "Question: What does the word say together from the bikers?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 403, + 412, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 403, + 412, + 423 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 412, + 423 + ], + "type": "text", + "content": "Answer: The word says Indianapolis." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 223, + 472, + 276, + 565 + ], + "blocks": [ + { + "bbox": [ + 223, + 472, + 276, + 565 + ], + "lines": [ + { + "bbox": [ + 223, + 472, + 276, + 565 + ], + "spans": [ + { + "bbox": [ + 223, + 472, + 276, + 565 + ], + "type": "image", + "image_path": "00a522bfdf47031dcbd13b64c0084f5b20a14508fbc9c3cea078addb8671c68b.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 572, + 402, + 585 + ], + "lines": [ + { + "bbox": [ + 208, + 572, + 402, + 585 + ], + "spans": [ + { + "bbox": [ + 208, + 572, + 402, + 585 + ], + "type": "text", + "content": "Figure 23: PLM-8B OCR in the wild examples." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 494, + 412, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 494, + 412, + 514 + ], + "spans": [ + { + "bbox": [ + 309, + 494, + 412, + 514 + ], + "type": "text", + "content": "Question: How tall is the. \nkitty character in the image?" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 514, + 412, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 514, + 412, + 544 + ], + "spans": [ + { + "bbox": [ + 309, + 514, + 412, + 544 + ], + "type": "text", + "content": "Answer: The kitty character is approximately " + }, + { + "bbox": [ + 309, + 514, + 412, + 544 + ], + "type": "inline_equation", + "content": "100\\mathrm{cm}" + }, + { + "bbox": [ + 309, + 514, + 412, + 544 + ], + "type": "text", + "content": " tall." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 209, + 142, + 291, + 267 + ], + "blocks": [ + { + "bbox": [ + 209, + 142, + 291, + 267 + ], + "lines": [ + { + "bbox": [ + 209, + 142, + 291, + 267 + ], + "spans": [ + { + "bbox": [ + 209, + 142, + 291, + 267 + ], + "type": "image", + "image_path": "0b422e57894781ed7c26024a43c39c17fb9552b27d825a07a257853b593a0c99.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 276, + 384, + 289 + ], + "lines": [ + { + "bbox": [ + 225, + 276, + 384, + 289 + ], + "spans": [ + { + "bbox": [ + 225, + 276, + 384, + 289 + ], + "type": "text", + "content": "Figure 24: PLM-8B Natural image QA" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 310, + 140, + 411, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 140, + 411, + 160 + ], + "spans": [ + { + "bbox": [ + 310, + 140, + 411, + 160 + ], + "type": "text", + "content": "Question: What is inside the oven?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 310, + 161, + 411, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 161, + 411, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 161, + 411, + 180 + ], + "type": "text", + "content": "Answer: There is a baking sheet inside the oven." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 310, + 181, + 413, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 181, + 413, + 200 + ], + "spans": [ + { + "bbox": [ + 310, + 181, + 413, + 200 + ], + "type": "text", + "content": "Question: What temperature is the oven set to?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 310, + 201, + 411, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 201, + 411, + 220 + ], + "spans": [ + { + "bbox": [ + 310, + 201, + 411, + 220 + ], + "type": "text", + "content": "Answer: The oven is set to 200 degrees." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 310, + 220, + 411, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 220, + 411, + 240 + ], + "spans": [ + { + "bbox": [ + 310, + 220, + 411, + 240 + ], + "type": "text", + "content": "Question: What unit is the oven's temperature set to?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 241, + 413, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 241, + 413, + 260 + ], + "spans": [ + { + "bbox": [ + 310, + 241, + 413, + 260 + ], + "type": "text", + "content": "Answer: The oven's temperature is set to Celsius." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 186, + 434, + 313, + 632 + ], + "blocks": [ + { + "bbox": [ + 186, + 434, + 313, + 632 + ], + "lines": [ + { + "bbox": [ + 186, + 434, + 313, + 632 + ], + "spans": [ + { + "bbox": [ + 186, + 434, + 313, + 632 + ], + "type": "image", + "image_path": "507366e34ef9bdd8494b0097e739e8fea6443778365fe28bca28c076dd89537e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 639, + 371, + 652 + ], + "lines": [ + { + "bbox": [ + 238, + 639, + 371, + 652 + ], + "spans": [ + { + "bbox": [ + 238, + 639, + 371, + 652 + ], + "type": "text", + "content": "Figure 25: PLM-8B Captioning." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 339, + 438, + 442, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 438, + 442, + 458 + ], + "spans": [ + { + "bbox": [ + 339, + 438, + 442, + 458 + ], + "type": "text", + "content": "Question: Describe the image in details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 339, + 459, + 442, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 459, + 442, + 628 + ], + "spans": [ + { + "bbox": [ + 339, + 459, + 442, + 628 + ], + "type": "text", + "content": "Answer: A city skyline with a large body of water in the foreground. The city skyline is made up of tall buildings, some of which are skyscrapers. The buildings are densely packed together and vary in height and color. In the foreground, there is a large body of water that appears to be a harbor or bay. There are many boats and ships in the water, some of which are docked at the shore while others are sailing or anchored in the middle of the water." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 140, + 511, + 261 + ], + "blocks": [ + { + "bbox": [ + 111, + 140, + 511, + 261 + ], + "lines": [ + { + "bbox": [ + 111, + 140, + 511, + 261 + ], + "spans": [ + { + "bbox": [ + 111, + 140, + 511, + 261 + ], + "type": "image", + "image_path": "4c4e50b1e9bf713125cd4b7e2fc1c66361692d068cfaa23f80097b03c2b8e462.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 294, + 263, + 328, + 275 + ], + "lines": [ + { + "bbox": [ + 294, + 263, + 328, + 275 + ], + "spans": [ + { + "bbox": [ + 294, + 263, + 328, + 275 + ], + "type": "text", + "content": "RDCap" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 278, + 511, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 278, + 511, + 309 + ], + "spans": [ + { + "bbox": [ + 111, + 278, + 511, + 309 + ], + "type": "text", + "content": "Question: Describe the subject's actions within the red rectangles, providing start and end frames ids and a brief description for each action in a dense caption format. For each item use the format [start, end]: [description] separated by a newline, where start and end are frame numbers between 0 and 31 in this 32 frame video." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 309, + 245, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 309, + 245, + 319 + ], + "spans": [ + { + "bbox": [ + 112, + 309, + 245, + 319 + ], + "type": "text", + "content": "Answer: Frame [0, 6]: Out of frame" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 319, + 511, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 319, + 511, + 339 + ], + "spans": [ + { + "bbox": [ + 112, + 319, + 511, + 339 + ], + "type": "text", + "content": "Frame [6, 15]: A woman is walking towards the camera with a child. She is holding the child's hand with her right hand and carrying a bag in her left hand" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 339, + 221, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 339, + 221, + 349 + ], + "spans": [ + { + "bbox": [ + 112, + 339, + 221, + 349 + ], + "type": "text", + "content": "Frame [16, 31]: Out of frame." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 298, + 370, + 324, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 370, + 324, + 382 + ], + "spans": [ + { + "bbox": [ + 298, + 370, + 324, + 382 + ], + "type": "text", + "content": "RCap" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 385, + 510, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 385, + 510, + 405 + ], + "spans": [ + { + "bbox": [ + 111, + 385, + 510, + 405 + ], + "type": "text", + "content": "Question: What is happening to the subject in the region highlighted by the red rectangle during frames [6, 15] in this 32 frame video?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 112, + 406, + 511, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 406, + 511, + 426 + ], + "spans": [ + { + "bbox": [ + 112, + 406, + 511, + 426 + ], + "type": "text", + "content": "Answer: a woman is walking on a muddy road with a child. she is holding the child's hand with her right hand and adjusting her saree with her left hand while walking" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 295, + 430, + 327, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 430, + 327, + 441 + ], + "spans": [ + { + "bbox": [ + 295, + 430, + 327, + 441 + ], + "type": "text", + "content": "RTLoc" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 444, + 511, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 444, + 511, + 485 + ], + "spans": [ + { + "bbox": [ + 111, + 444, + 511, + 485 + ], + "type": "text", + "content": "Question: Please provide the start and end frame of when 'an individual standing near a water body is drinking a from a water from a bottle' happens corresponding to the region marked by the blue rectangle. Use the format [start, end] where start and end are frame numbers between 0 and 31 in this 32 frame video. Answer: [23, 26]" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 286, + 487, + 337, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 487, + 337, + 499 + ], + "spans": [ + { + "bbox": [ + 286, + 487, + 337, + 499 + ], + "type": "text", + "content": "Captioning" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 502, + 352, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 502, + 352, + 514 + ], + "spans": [ + { + "bbox": [ + 111, + 502, + 352, + 514 + ], + "type": "text", + "content": "Question: Please describe the video in detail, focusing on actions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 514, + 511, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 514, + 511, + 563 + ], + "spans": [ + { + "bbox": [ + 111, + 514, + 511, + 563 + ], + "type": "text", + "content": "Answer: This video features a group of people walking around a muddy area with a small bridge in the background. The video is taken outdoors and the weather is sunny. The video starts with a man standing on the left side of the frame, facing the camera. He is wearing a white shirt, a black vest, black trousers and white shoes. He has a scarf around his neck. The camera pans to the right and shows a group of people walking around. The video is taken from a low angle and the camera is handheld." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 572, + 511, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 572, + 511, + 594 + ], + "spans": [ + { + "bbox": [ + 111, + 572, + 511, + 594 + ], + "type": "text", + "content": "The video ends with a man walking towards the left side of the frame. He is wearing a blue shirt, black trousers and brown shoes. He is holding a bottle in his right hand and is drinking from it." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 274, + 597, + 348, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 597, + 348, + 609 + ], + "spans": [ + { + "bbox": [ + 274, + 597, + 348, + 609 + ], + "type": "text", + "content": "Fine-grained QA" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 611, + 402, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 611, + 402, + 622 + ], + "spans": [ + { + "bbox": [ + 111, + 611, + 402, + 622 + ], + "type": "text", + "content": "Question: Please explain what the man in blue t-shirt is pointing at in the video." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 622, + 320, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 622, + 320, + 632 + ], + "spans": [ + { + "bbox": [ + 111, + 622, + 320, + 632 + ], + "type": "text", + "content": "Answer: The man in blue t-shirt is pointing at the bridge." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 639, + 466, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 639, + 466, + 652 + ], + "spans": [ + { + "bbox": [ + 141, + 639, + 466, + 652 + ], + "type": "text", + "content": "Figure 26: PLM-8B Video Region Captioning, Grounding and Fine-grained QA." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 164, + 511, + 222 + ], + "blocks": [ + { + "bbox": [ + 111, + 164, + 511, + 222 + ], + "lines": [ + { + "bbox": [ + 111, + 164, + 511, + 222 + ], + "spans": [ + { + "bbox": [ + 111, + 164, + 511, + 222 + ], + "type": "image", + "image_path": "4736a69fcb2b8863af5879c939b7d3dfa72f0cdb4c8b8682d4d8f4a18ab8fb9d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 225, + 418, + 236 + ], + "lines": [ + { + "bbox": [ + 111, + 225, + 418, + 236 + ], + "spans": [ + { + "bbox": [ + 111, + 225, + 418, + 236 + ], + "type": "text", + "content": "Question: Please give me a details of the video focusing on actions and interactions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 111, + 236, + 510, + 296 + ], + "lines": [ + { + "bbox": [ + 111, + 236, + 510, + 296 + ], + "spans": [ + { + "bbox": [ + 111, + 236, + 510, + 296 + ], + "type": "text", + "content": "Answer: This video features a close up shot of a hand holding a plush toy mouse with a grey kitten standing on a white chair, reaching out and touching the toy with its paws. In the beginning of the video, the kitten is seen standing on the chair and reaching out with both paws to touch the toy mouse. The kitten then withdraws its paws and sits back on the chair. The hand holding the toy mouse is seen moving the toy closer to the kitten and then away from the kitten. The kitten then reaches out again and touches the toy with its paws. The hand then moves the toy away from the kitten again and the kitten looks to the left of the frame." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 111, + 304, + 275, + 316 + ], + "lines": [ + { + "bbox": [ + 111, + 304, + 275, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 304, + 275, + 316 + ], + "type": "text", + "content": "This video is set against a white background." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 227, + 323, + 383, + 335 + ], + "lines": [ + { + "bbox": [ + 227, + 323, + 383, + 335 + ], + "spans": [ + { + "bbox": [ + 227, + 323, + 383, + 335 + ], + "type": "text", + "content": "Figure 27: PLM-8B Video Captioning" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 527, + 511, + 586 + ], + "blocks": [ + { + "bbox": [ + 111, + 527, + 511, + 586 + ], + "lines": [ + { + "bbox": [ + 111, + 527, + 511, + 586 + ], + "spans": [ + { + "bbox": [ + 111, + 527, + 511, + 586 + ], + "type": "image", + "image_path": "aa12bcb8edff9ba1ae7ef304cae20debee36198b18474ddd9e63ef6b76c6cf17.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 587, + 348, + 598 + ], + "lines": [ + { + "bbox": [ + 111, + 587, + 348, + 598 + ], + "spans": [ + { + "bbox": [ + 111, + 587, + 348, + 598 + ], + "type": "text", + "content": "Question: What are the ingredients being thrown into the mixer?" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 111, + 598, + 253, + 609 + ], + "lines": [ + { + "bbox": [ + 111, + 598, + 253, + 609 + ], + "spans": [ + { + "bbox": [ + 111, + 598, + 253, + 609 + ], + "type": "text", + "content": "Answer: Carrots, apples, and bananas." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 241, + 616, + 368, + 628 + ], + "lines": [ + { + "bbox": [ + 241, + 616, + 368, + 628 + ], + "spans": [ + { + "bbox": [ + 241, + 616, + 368, + 628 + ], + "type": "text", + "content": "Figure 28: PLM-8B Video QA" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 280, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 280, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 280, + 85 + ], + "type": "text", + "content": "L Limitations and Future Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 162 + ], + "type": "text", + "content": "Our PLM models achieve strong performance against open-data baselines and proprietary models alike, however there is still room for improvement in both modeling and data. On the model front, we do not experiment extensively with long video modeling components (e.g., token compression, dynamic temporal resolution). As a result, our performance on long video benchmarks [92, 94, 96] is less competitive (see Table F). PLM is compatible with such newer advancements and can be incorporated in future work." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 167, + 506, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 244 + ], + "type": "text", + "content": "Additionally, our results are sensitive to the characteristics of the base LLM. We see especially low performance of PLM on benchmarks such as MMMU [37], MME [41] and Video-MME [75] (see Tables 3 and 4), where the strongest baselines often rely on LLMs that are more verbose, but also have a likely much larger language component (see the gap to proprietary models on some benchmarks). We also note that our model performs relatively poorly on our SGQA task (Table 5), targeting a mix of perception and knowledge based questions to smart glasses. Strong chatbot-focused systems like GPT-4o excel at tasks that go beyond core perception." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 248, + 506, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 248, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 506, + 283 + ], + "type": "text", + "content": "On the data front, our mix focuses squarely on visual perception — it does not include for example, multi-step reasoning, robotics or world-knowledge data. Despite these limitations, PLM contributes new capabilities and strong benchmark results, and set a new standard for fully reproducible VLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 297, + 214, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 214, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 214, + 312 + ], + "type": "text", + "content": "M Broader Impact" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 322, + 506, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 506, + 410 + ], + "type": "text", + "content": "Our work aims to advance open and reproducible research in vision-language modeling by releasing models, data, and benchmarks that support open research. By not having any distillation from proprietary models, we hope to improve reproducible and transparent training and evaluation of VLM research. However, like all MLLMs, our Perception Language Model (PLM) may have some risks. Even by carefully selecting datasets and apply several mitigation (CSAM, NSFW, etc.), the model may still contain hidden biases or generate inappropriate or harmful content. We took steps to reduce these risks by teaching the model to refuse answering questions related to bias, harassment, or adult content. We also remove all samples containing any mention of human faces from all the datasets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 415, + 505, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 505, + 471 + ], + "type": "text", + "content": "We also annotate and release a large-scale dataset for fine-grained video question answering and spatio-temporal grounding. This release has the potential to significantly advance research in image and video understanding. Making the dataset openly available allows others to reproduce our work and invites broader community involvement. This transparency supports safer and more accountable progress, helping researchers better understand and address potential biases or limitations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 474, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 506, + 498 + ], + "type": "text", + "content": "We believe that by openly sharing our models and data, while actively addressing ethical concerns, our work can contribute positively to vision-language research." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 110, + 89, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 115, + 89, + 505, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 89, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 115, + 89, + 505, + 111 + ], + "type": "text", + "content": "[1] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 120, + 505, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 120, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 115, + 120, + 505, + 152 + ], + "type": "text", + "content": "[2] Erfei Cui, Yinan He, Zheng Ma, Zhe Chen, Hao Tian, Weiyun Wang, Kunchang Li, Yi Wang, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, Yali Wang, Limin Wang, Yu Qiao, and Jifeng Dai. Sharegpt-4o: Comprehensive multimodal annotations with gpt-4o, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 161, + 505, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 161, + 505, + 193 + ], + "spans": [ + { + "bbox": [ + 115, + 161, + 505, + 193 + ], + "type": "text", + "content": "[3] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. Sharegpt4v: Improving large multi-modal models with better captions. In European Conference on Computer Vision, pages 370-387. Springer, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 201, + 505, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 201, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 115, + 201, + 505, + 224 + ], + "type": "text", + "content": "[4] Farre Miquel, Marafioti Andres, Tunstall Lewis, von Werra Leandro, Conghui He, Cuenca Pedro, and Wolf Thomas. Finevideo: behind the scenes, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 233, + 505, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 233, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 115, + 233, + 505, + 255 + ], + "type": "text", + "content": "[5] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 263, + 505, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 263, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 115, + 263, + 505, + 304 + ], + "type": "text", + "content": "[6] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, Li Yuan, Yu Qiao, Dahua Lin, Feng Zhao, and Jiaqi Wang. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 314, + 505, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 314, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 115, + 314, + 505, + 336 + ], + "type": "text", + "content": "[7] Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle-2: Faster inference of language models with dynamic draft trees, 2024b. URL https://arxiv.org/abs/2406.16858, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 345, + 505, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 345, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 115, + 345, + 505, + 368 + ], + "type": "text", + "content": "[8] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, and Jingjing Liu. Hero: Hierarchical encoder for video+ language omni-representation pre-training. arXiv preprint arXiv:2005.00200, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 376, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 376, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 115, + 376, + 505, + 407 + ], + "type": "text", + "content": "[9] Zhu Zhang, Zhou Zhao, Yang Zhao, Qi Wang, Huasheng Liu, and Lianli Gao. Where does it exist: Spatio-temporal video grounding for multi-form sentences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10668-10677, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 110, + 417, + 505, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 417, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 110, + 417, + 505, + 449 + ], + "type": "text", + "content": "[10] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 110, + 457, + 505, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 457, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 110, + 457, + 505, + 559 + ], + "type": "text", + "content": "[11] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Jen Dumas, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weis, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 568, + 505, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 568, + 505, + 590 + ], + "spans": [ + { + "bbox": [ + 110, + 568, + 505, + 590 + ], + "type": "text", + "content": "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava- next: Improved reasoning,OCR,and world knowledge, January 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 110, + 599, + 505, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 599, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 110, + 599, + 505, + 631 + ], + "type": "text", + "content": "[13] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 110, + 640, + 505, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 640, + 505, + 681 + ], + "spans": [ + { + "bbox": [ + 110, + 640, + 505, + 681 + ], + "type": "text", + "content": "[14] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Anwen Hu, Haowei Liu, Qi Qian, Ji Zhang, and Fei Huang. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13040–13051, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 691, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 691, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 110, + 691, + 505, + 721 + ], + "type": "text", + "content": "[15] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 134 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 134 + ], + "type": "text", + "content": "[16] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. arXiv preprint arXiv:2204.14198, 2022." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 140, + 506, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 140, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 111, + 140, + 506, + 173 + ], + "type": "text", + "content": "[17] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pretraining for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 178, + 506, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 506, + 211 + ], + "type": "text", + "content": "[18] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 217, + 506, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 217, + 506, + 257 + ], + "spans": [ + { + "bbox": [ + 111, + 217, + 506, + 257 + ], + "type": "text", + "content": "[19] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Austin Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 265, + 506, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 265, + 506, + 296 + ], + "spans": [ + { + "bbox": [ + 111, + 265, + 506, + 296 + ], + "type": "text", + "content": "[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 303, + 504, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 303, + 504, + 325 + ], + "spans": [ + { + "bbox": [ + 111, + 303, + 504, + 325 + ], + "type": "text", + "content": "[21] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 331, + 504, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 331, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 331, + 504, + 354 + ], + "type": "text", + "content": "[22] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Khan. Videogpt+: Integrating image and video encoders for enhanced video understanding. arXiv preprint arXiv:2406.09418, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 359, + 504, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 359, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 111, + 359, + 504, + 381 + ], + "type": "text", + "content": "[23] Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 388, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 388, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 111, + 388, + 504, + 419 + ], + "type": "text", + "content": "[24] Jiajun Liu, Yibing Wang, Hanghang Ma, Xiaoping Wu, Xiaoqi Ma, Xiaoming Wei, Jianbin Jiao, Enhua Wu, and Jie Hu. Kangaroo: A powerful video-language model supporting long-context video input. arXiv preprint arXiv:2408.15542, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 426, + 504, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 504, + 457 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 504, + 457 + ], + "type": "text", + "content": "[25] Xiaoqian Shen, Yunyang Xiong, Changsheng Zhao, Lemeng Wu, Jun Chen, Chenchen Zhu, Zechun Liu, Fanyi Xiao, Balakrishnan Varadarajan, Florian Bordes, et al. Longvu: Spatiotemporal adaptive compression for long video-language understanding. arXiv preprint arXiv:2410.17434, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 464, + 504, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 464, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 111, + 464, + 504, + 495 + ], + "type": "text", + "content": "[26] Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 502, + 504, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 502, + 504, + 524 + ], + "spans": [ + { + "bbox": [ + 111, + 502, + 504, + 524 + ], + "type": "text", + "content": "[27] Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 530, + 506, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 530, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 111, + 530, + 506, + 561 + ], + "type": "text", + "content": "[28] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 568, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 568, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 111, + 568, + 506, + 609 + ], + "type": "text", + "content": "[29] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Tushar Nagarajan, Matt Smith, Shashank Jain, Chun-Fu Yeh, Prakash Murugesan, Peyman Heidari, Yue Liu, et al. Anymal: An efficient and scalable any-modality augmented language model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1314-1332, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 616, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 616, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 616, + 504, + 647 + ], + "type": "text", + "content": "[30] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 654, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 654, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 654, + 506, + 685 + ], + "type": "text", + "content": "[31] Rohan Choudhury, Guanglei Zhu, Sihan Liu, Koichiro Niinuma, Kris M Kitani, and László Jeni. Don't look twice: Faster video transformers with run-length tokenization. arXiv preprint arXiv:2411.05222, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 692, + 285, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 692, + 285, + 704 + ], + "spans": [ + { + "bbox": [ + 111, + 692, + 285, + 704 + ], + "type": "text", + "content": "[32] OpenAI. Gpt-4v(ision) system card, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 711, + 261, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 711, + 261, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 711, + 261, + 723 + ], + "type": "text", + "content": "[33] OpenAI. Gpt-4o system card, 2024." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 94 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 94 + ], + "type": "text", + "content": "[34] Gemini Team Google. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 99, + 505, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 99, + 505, + 121 + ], + "spans": [ + { + "bbox": [ + 111, + 99, + 505, + 121 + ], + "type": "text", + "content": "[35] Gemini Team Google. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 127, + 373, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 127, + 373, + 138 + ], + "spans": [ + { + "bbox": [ + 111, + 127, + 373, + 138 + ], + "type": "text", + "content": "[36] Anthropic. The claude 3 model family: Opus, sonnet, haiku. 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 144, + 504, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 144, + 504, + 185 + ], + "spans": [ + { + "bbox": [ + 111, + 144, + 504, + 185 + ], + "type": "text", + "content": "[37] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9556-9567, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 191, + 504, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 191, + 504, + 223 + ], + "spans": [ + { + "bbox": [ + 111, + 191, + 504, + 223 + ], + "type": "text", + "content": "[38] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 228, + 505, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 228, + 505, + 250 + ], + "spans": [ + { + "bbox": [ + 111, + 228, + 505, + 250 + ], + "type": "text", + "content": "[39] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-okvqa: A benchmark for visual question answering using world knowledge, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 255, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 255, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 111, + 255, + 505, + 297 + ], + "type": "text", + "content": "[40] Jeffrey P Bigham, Chandrika Jayant, Hanjie Ji, Greg Little, Andrew Miller, Robert C Miller, Robin Miller, Aubrey Tatarowicz, Brandyn White, Samual White, et al. Vizwiz: nearly real-time answers to visual questions. In Proceedings of the 23nd annual ACM symposium on User interface software and technology, pages 333-342, 2010." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 303, + 504, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 303, + 504, + 334 + ], + "spans": [ + { + "bbox": [ + 111, + 303, + 504, + 334 + ], + "type": "text", + "content": "[41] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 340, + 504, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 340, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 111, + 340, + 504, + 370 + ], + "type": "text", + "content": "[42] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 377, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 377, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 111, + 377, + 504, + 408 + ], + "type": "text", + "content": "[43] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 415, + 505, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 415, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 111, + 415, + 505, + 445 + ], + "type": "text", + "content": "[44] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. In European Conference on Computer Vision, pages 148-166, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 452, + 505, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 452, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 111, + 452, + 505, + 462 + ], + "type": "text", + "content": "[45] xai. RealworldQA benchmark. https://huggingface.co/datasets/xai-org/RealworldQA, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 468, + 505, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 468, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 111, + 468, + 505, + 498 + ], + "type": "text", + "content": "[46] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 506, + 505, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 506, + 505, + 526 + ], + "spans": [ + { + "bbox": [ + 111, + 506, + 505, + 526 + ], + "type": "text", + "content": "[47] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 533, + 504, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 533, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 111, + 533, + 504, + 563 + ], + "type": "text", + "content": "[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 570, + 505, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 570, + 505, + 610 + ], + "spans": [ + { + "bbox": [ + 111, + 570, + 505, + 610 + ], + "type": "text", + "content": "[49] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 617, + 504, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 617, + 504, + 648 + ], + "spans": [ + { + "bbox": [ + 111, + 617, + 504, + 648 + ], + "type": "text", + "content": "[50] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8948-8957, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 654, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 654, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 654, + 504, + 685 + ], + "type": "text", + "content": "[51] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2014." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 691, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 691, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 691, + 505, + 722 + ], + "type": "text", + "content": "[52] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "type": "text", + "content": "[53] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. Docvqa: A dataset for vqa on document images. In 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 2199-2208, 2021." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "type": "text", + "content": "[54] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. arXiv preprint arXiv:2407.21038, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 138, + 504, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 138, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 111, + 138, + 504, + 159 + ], + "type": "text", + "content": "[55] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images, 2016." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 165, + 505, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 165, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 111, + 165, + 505, + 196 + ], + "type": "text", + "content": "[56] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C. V. Jawahar. Infographicvqa. In 2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 2582-2591, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 202, + 505, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 202, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 111, + 202, + 505, + 235 + ], + "type": "text", + "content": "[57] Yuliang Liu, Zhang Li, Mingxin Huang, Biao Yang, Wenwen Yu, Chunyuan Li, Xu-Cheng Yin, Cheng-Lin Liu, Lianwen Jin, and Xiang Bai. Ocrbench: on the hidden mystery ofOCR in large multimodal models. Science China Information Sciences, 67(12):220102, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 240, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 240, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 111, + 240, + 504, + 262 + ], + "type": "text", + "content": "[58] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 268, + 505, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 268, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 111, + 268, + 505, + 300 + ], + "type": "text", + "content": "[59] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 305, + 504, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 305, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 111, + 305, + 504, + 337 + ], + "type": "text", + "content": "[60] Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. From recognition to cognition: Visual commonsense reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6720-6731, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 342, + 505, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 342, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 111, + 342, + 505, + 393 + ], + "type": "text", + "content": "[61] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 2507-2521. Curran Associates, Inc., 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 400, + 505, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 400, + 505, + 432 + ], + "spans": [ + { + "bbox": [ + 111, + 400, + 505, + 432 + ], + "type": "text", + "content": "[62] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 438, + 504, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 438, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 111, + 438, + 504, + 469 + ], + "type": "text", + "content": "[63] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 475, + 504, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 475, + 504, + 497 + ], + "spans": [ + { + "bbox": [ + 111, + 475, + 504, + 497 + ], + "type": "text", + "content": "[64] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 503, + 505, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 503, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 111, + 503, + 505, + 534 + ], + "type": "text", + "content": "[65] Jierun Chen, Fangyun Wei, Jinjing Zhao, Sizhe Song, Bohuai Wu, Zhuoxuan Peng, S-H Gary Chan, and Hongyang Zhang. Revisiting referring expression comprehension evaluation in the era of large multimodal models. arXiv preprint arXiv:2406.16866, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 540, + 505, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 540, + 505, + 572 + ], + "spans": [ + { + "bbox": [ + 111, + 540, + 505, + 572 + ], + "type": "text", + "content": "[66] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 578, + 505, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 578, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 111, + 578, + 505, + 620 + ], + "type": "text", + "content": "[67] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14375-14385, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 625, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 625, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 625, + 504, + 647 + ], + "type": "text", + "content": "[68] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 653, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 653, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 653, + 504, + 685 + ], + "type": "text", + "content": "[69] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 691, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 691, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 691, + 505, + 723 + ], + "type": "text", + "content": "[70] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[71] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 111, + 504, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 111, + 504, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 111, + 504, + 142 + ], + "type": "text", + "content": "[72] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. Star: A benchmark for situated reasoning in real-world videos. In Thirty-fifth Conference on Neural Information Processing Systems (NeurIPS), 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 148, + 505, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 505, + 180 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 505, + 180 + ], + "type": "text", + "content": "[73] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. Tgif-qa: Toward spatiotemporal reasoning in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2758–2766, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 186, + 504, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 504, + 209 + ], + "type": "text", + "content": "[74] Jie Lei, Licheng Yu, Mohit Bansal, and Tamara L Berg. Tvqa: Localized, compositional video question answering. arXiv preprint arXiv:1809.01696, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 216, + 505, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 216, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 111, + 216, + 505, + 247 + ], + "type": "text", + "content": "[75] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal ILms in video analysis. arXiv preprint arXiv:2405.21075, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 254, + 505, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 254, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 254, + 505, + 285 + ], + "type": "text", + "content": "[76] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 9127–9134, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 292, + 505, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 292, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 111, + 292, + 505, + 323 + ], + "type": "text", + "content": "[77] Munan Ning, Bin Zhu, Yujia Xie, Bin Lin, Jiaxi Cui, Lu Yuan, Dongdong Chen, and Li Yuan. Video-bench: A comprehensive benchmark and toolkit for evaluating video-based large language models. arXiv preprint arXiv:2311.16103, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 331, + 504, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 331, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 111, + 331, + 504, + 352 + ], + "type": "text", + "content": "[78] Jianrui Zhang, Mu Cai, and Yong Jae Lee. Vinoground: Scrutinizing Imms over dense temporal reasoning with short videos. arXiv preprint arXiv:2410.02763, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 358, + 505, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 358, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 111, + 358, + 505, + 389 + ], + "type": "text", + "content": "[79] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 396, + 505, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 396, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 111, + 396, + 505, + 418 + ], + "type": "text", + "content": "[80] Daniel Cores, Michael Dorkenwald, Manuel Mucientes, Cees GM Snoek, and Yuki M Asano. Tvbench: Redesigning video-language evaluation. arXiv preprint arXiv:2410.07752, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 425, + 505, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 425, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 111, + 425, + 505, + 455 + ], + "type": "text", + "content": "[81] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5288-5296, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 463, + 505, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 463, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 111, + 463, + 505, + 495 + ], + "type": "text", + "content": "[82] David Chen and William B Dolan. Collecting highly parallel data for paraphrase evaluation. In Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies, pages 190-200, 2011." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 501, + 505, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 501, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 111, + 501, + 505, + 523 + ], + "type": "text", + "content": "[83] Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI conference on artificial intelligence, volume 32, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 529, + 505, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 529, + 505, + 561 + ], + "spans": [ + { + "bbox": [ + 111, + 529, + 505, + 561 + ], + "type": "text", + "content": "[84] Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang, and William Yang Wang. Vatex: A large-scale, high-quality multilingual dataset for video-and-language research. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4581-4591, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 568, + 505, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 568, + 505, + 590 + ], + "spans": [ + { + "bbox": [ + 111, + 568, + 505, + 590 + ], + "type": "text", + "content": "[85] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 596, + 505, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 596, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 111, + 596, + 505, + 617 + ], + "type": "text", + "content": "[86] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 624, + 505, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 624, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 111, + 624, + 505, + 655 + ], + "type": "text", + "content": "[87] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D Manning. Auroracap: Efficient, performant video detailed captioning and a new benchmark. arXiv preprint arXiv:2410.03051, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 662, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 505, + 693 + ], + "type": "text", + "content": "[88] Yuxuan Wang, Yueqian Wang, Dongyan Zhao, Cihang Xie, and Zilong Zheng. Videohallucer: Evaluating intrinsic and extrinsic hallucinations in large video-language models. arXiv preprint arXiv:2406.16338, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 700, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 700, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 700, + 505, + 722 + ], + "type": "text", + "content": "[89] Jiacheng Zhang, Yang Jiao, Shaoxiang Chen, Jingjing Chen, and Yu-Gang Jiang. Eventhallusion: Diagnosing event hallucinations in video llms. arXiv preprint arXiv:2409.16597, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "type": "text", + "content": "[90] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 111, + 504, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 111, + 504, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 111, + 504, + 142 + ], + "type": "text", + "content": "[91] Ruchit Rawal, Khalid Saifullah, Miquel Farré, Ronen Basri, David Jacobs, Gowthami Somepalli, and Tom Goldstein. Cinepile: A long video question answering dataset and benchmark. arXiv preprint arXiv:2405.08813, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 149, + 505, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 149, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 149, + 505, + 182 + ], + "type": "text", + "content": "[92] Weihan Wang, Zehai He, Wenyi Hong, Yean Cheng, Xiaohan Zhang, Ji Qi, Xiaotao Gu, Shiyu Huang, Bin Xu, Yuxiao Dong, et al. Lvbench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 189, + 505, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 189, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 189, + 505, + 220 + ], + "type": "text", + "content": "[93] Makarand Tapaswi, Yukun Zhu, Rainer Stiefelhagen, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. Movieqa: Understanding stories in movies through question-answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4631–4640, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 228, + 507, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 228, + 507, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 228, + 507, + 258 + ], + "type": "text", + "content": "[94] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37:28828-28857, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 266, + 505, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 266, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 111, + 266, + 505, + 308 + ], + "type": "text", + "content": "[95] Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18221-18232, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 314, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 314, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 314, + 505, + 346 + ], + "type": "text", + "content": "[96] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 353, + 505, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 353, + 505, + 384 + ], + "spans": [ + { + "bbox": [ + 111, + 353, + 505, + 384 + ], + "type": "text", + "content": "[97] Guo Chen, Yicheng Liu, Yifei Huang, Yuping He, Baoqi Pei, Jilan Xu, Yali Wang, Tong Lu, and Limin Wang. Cg-bench: Clue-grounded question answering benchmark for long video understanding. arXiv preprint arXiv:2412.12075, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 392, + 505, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 392, + 505, + 423 + ], + "spans": [ + { + "bbox": [ + 111, + 392, + 505, + 423 + ], + "type": "text", + "content": "[98] Orr Zohar, Xiaohan Wang, Yann Dubois, Nikhil Mehta, Tong Xiao, Philippe Hansen-Estruch, Licheng Yu, Xiaofang Wang, Felix Juefei-Xu, Ning Zhang, et al. Apollo: An exploration of video understanding in large multimodal models. arXiv preprint arXiv:2412.10360, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 430, + 505, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 430, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 111, + 430, + 505, + 461 + ], + "type": "text", + "content": "[99] Mu Cai, Reuben Tan, Jianrui Zhang, Bocheng Zou, Kai Zhang, Feng Yao, Fangrui Zhu, Jing Gu, Yiwu Zhong, Yuzhang Shang, et al. Temporalbench: Benchmarking fine-grained temporal understanding for multimodal video models. arXiv preprint arXiv:2410.10818, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 468, + 505, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 505, + 501 + ], + "type": "text", + "content": "[100] Ziyao Shangguan, Chuhan Li, Yuxuan Ding, Yanan Zheng, Yilun Zhao, Tesca Fitzgerald, and Arman Cohan. Tomato: Assessing visual temporal reasoning capabilities in multimodal foundation models. arXiv preprint arXiv:2410.23266, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 507, + 505, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 507, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 507, + 505, + 540 + ], + "type": "text", + "content": "[101] Wenyi Hong, Yean Cheng, Zhuoyi Yang, Weihan Wang, Lefan Wang, Xiaotao Gu, Shiyu Huang, Yuxiao Dong, and Jie Tang. Motionbench: Benchmarking and improving fine-grained video motion understanding for vision language models. arXiv preprint arXiv:2501.02955, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 546, + 505, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 546, + 505, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 505, + 577 + ], + "type": "text", + "content": "[102] Yuanxin Liu, Shicheng Li, Yi Liu, Yuxiang Wang, Shuhuai Ren, Lei Li, Sishuo Chen, Xu Sun, and Lu Hou. Tempcompass: Do video llms really understand videos? arXiv preprint arXiv:2403.00476, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 584, + 505, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 505, + 616 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 505, + 616 + ], + "type": "text", + "content": "[103] Mohammadreza Salehi, Jae Sung Park, Tanush Yadav, Aditya Kusupati, Ranjay Krishna, Yejin Choi, Hannaneh Hajishirzi, and Ali Farhadi. Actionatlas: A videoqa benchmark for domain-specialized action recognition. arXiv preprint arXiv:2410.05774, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 624, + 505, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 505, + 655 + ], + "type": "text", + "content": "[104] Daniel Bolya, Po-Yao Huang, Peize Sun, Jang Hyun Cho, Andrea Madotto, Chen Wei, Tengyu Ma, Jiale Zhi, Jathushan Rajasegaran, Hanoona Rasheed, et al. Perception encoder: The best visual embeddings are not at the output of the network. arXiv preprint arXiv:2504.13181, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 662, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 505, + 693 + ], + "type": "text", + "content": "[105] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 700, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 723 + ], + "type": "text", + "content": "[106] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 114 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 114 + ], + "type": "text", + "content": "[107] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Intervl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 122, + 265, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 122, + 265, + 134 + ], + "spans": [ + { + "bbox": [ + 106, + 122, + 265, + 134 + ], + "type": "text", + "content": "[108] Brandon Castellano. PySceneDetect." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 143, + 505, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 143, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 106, + 143, + 505, + 184 + ], + "type": "text", + "content": "[109] Ahmed Masry, Do Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Findings of the Association for Computational Linguistics: ACL 2022, pages 2263-2279, Dublin, Ireland, May 2022. Association for Computational Linguistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 193, + 505, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 193, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 505, + 224 + ], + "type": "text", + "content": "[110] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 232, + 505, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 232, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 505, + 254 + ], + "type": "text", + "content": "[111] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 262, + 505, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 262, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 505, + 293 + ], + "type": "text", + "content": "[112] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 302, + 505, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 302, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 505, + 333 + ], + "type": "text", + "content": "[113] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 342, + 505, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 342, + 505, + 373 + ], + "spans": [ + { + "bbox": [ + 106, + 342, + 505, + 373 + ], + "type": "text", + "content": "[114] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 382, + 505, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 382, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 106, + 382, + 505, + 533 + ], + "type": "text", + "content": "[115] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonio Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, Miguel Martin, Tushar Nagarajan, Ilija Radosavovic, Santhosh Kumar Ramakrishnan, Fiona Ryan, Jayant Sharma, Michael Wray, Mengmeng Xu, Eric Zhongcong Xu, Chen Zhao, Siddhant Bansal, Dhruv Batra, Vincent Cartillier, Sean Crane, Tien Do, Morrie Doulaty, Akshay Erapalli, Christoph Feichtenhofer, Adriano Fragomeni, Qichen Fu, Abraham Gebreselasie, Cristina Gonzalez, James Hillis, Xuhua Huang, Yifei Huang, Wenqi Jia, Weslie Khoo, Jachym Kolar, Satwik Kottur, Anurag Kumar, Federico Landini, Chao Li, Yanghao Li, Zhenqiang Li, Karttikeya Mangalam, Raghava Modhugu, Jonathan Munro, Tullie Murrell, Takumi Nishiyasu, Will Price, Paola Ruiz Puentes, Merey Ramazanova, Leda Sari, Kiran Somasundaram, Audrey Southerland, Yusuke Sugano, Ruijie Tao, Minh Vo, Yuchen Wang, Xindi Wu, Takuma Yagi, Ziwei Zhao, Yunyi Zhu, Pablo Arbelaez, David Crandall, Dima Damen, Giovanni Maria Farinella, Christian Fuegen, Bernard Ghanem, Vamsi Krishna Ithapu, C. V. Jawahar, Hanbyul Joo, Kris Kitani, Haizhou Li, Richard Newcombe, Aude Oliva, Hyun Soo Park, James M. Rehg, Yoichi Sato, Jianbo Shi, Mike Zheng Shou, Antonio Torralba, Lorenzo Torresani, Mingfei Yan, and Jitendra Malik. Ego4d: Around the world in 3,000 hours of egocentric video. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 541, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 541, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 541, + 505, + 722 + ], + "type": "text", + "content": "[116] Kristen Grauman, Andrew Westbury, Lorenzo Torresani, Kris Kitani, Jitendra Malik, Triantafyllos Afouras, Kumar Ashutosh, Vijay Baiyya, Siddhant Bansal, Bikram Boote, Eugene Byrne, Zachary Chavis, Joya Chen, Feng Cheng, Fu-Jen Chu, Sean Crane, Avijit Dasgupta, Jing Dong, María Escobar, Cristhian Forigua, Abraham Kahsay Gebreselasie, Sanjay Haresh, Jing Huang, Md Mohaiminul Islam, Suyog Dutt Jain, Rawal Khirodkar, Devansh Kukreja, Kevin J Liang, Jia-Wei Liu, Sagnik Majumder, Yongsen Mao, Miguel Martin, Effrosyni Mavroudi, Tushar Nagarajan, Francesco Ragusa, Santhosh K. Ramakrishnan, Luigi Seminara, Arjun Somayazulu, Yale Song, Shan Su, Zihui Xue, Edward Zhang, Jinxu Zhang, Angela Castillo, Changan Chen, Xinzhu Fu, Ryosuke Furuta, Cristina Gonzalez, Prince Gupta, Jiabo Hu, Yifei Huang, Yiming Huang, Weslie Khoo, Anush Kumar, Robert Kuo, Sach Lakhavani, Miao Liu, Mingjing Luo, Zhengyi Luo, Brighid Meredith, Austin Miller, Oluwatuminu Oguntola, Xiaqing Pan, Penny Peng, Shraman Pramanick, Merey Ramazanova, Fiona Ryan, Wei Shan, Kiran Somasundaram, Chenan Song, Audrey Southerland, Masatoshi Tateno, Huiyu Wang, Yuchen Wang, Takuma Yagi, Mingfei Yan, Xitong Yang, Zecheng Yu, Shengxin Cindy Zha, Chen Zhao, Ziwei Zhao, Zhifan Zhu, Jeff Zhuo, Pablo Arbeláez, Gedas Bertasius, David J. Crandall, Dima Damen, Jakob Julian Engel, Giovanni Maria Farinella, Antonino Furnari, Bernard Ghanem, Judy Hoffman, C. V. Jawahar, Richard A. Newcombe, Hyun Soo Park, James M. Rehg, Yoichi Sato, Manolis Savva, Jianbo Shi, Mike Zheng Shou, and Michael Wray. Ego-exo4d: Understanding skilled human activity from first- and third-person perspectives. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19383-19400, 2023." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "[117] Yansong Tang, Dajun Wang, Zhenyu Xu, Jingjing Liu, Xiaoyong Wang, Xing Gao, Jinhui Tang, and Dong Wu. Coin: A large-scale dataset for comprehensive instructional video analysis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 506, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 506, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 506, + 142 + ], + "type": "text", + "content": "[118] Dimitri Zhukov, Jean-Baptiste Alayrac, Chen Sun, Ivan Laptev, Cordelia Schmid, and Josef Sivic. Cross-task weakly supervised learning from instructional videos. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 148, + 506, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 148, + 506, + 179 + ], + "spans": [ + { + "bbox": [ + 107, + 148, + 506, + 179 + ], + "type": "text", + "content": "[119] Thong Thanh Nguyen, Zhiyuan Hu, Xiaobao Wu, Cong-Duy T Nguyen, See-Kiong Ng, and Anh Tuan Luu. Encoding and controlling global semantics for long-form video question answering. arXiv preprint arXiv:2405.19723, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 186, + 506, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 186, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 186, + 506, + 217 + ], + "type": "text", + "content": "[120] Kexin Yi, Chuang Gan, Yunzhu Li, Pushmeet Kohli, Jiajun Wu, Antonio Torralba, and Joshua B Tenenbaum. Clevrer: Collision events for video representation and reasoning. arXiv preprint arXiv:1910.01442, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 223, + 506, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 223, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 107, + 223, + 506, + 255 + ], + "type": "text", + "content": "[121] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 261, + 506, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 261, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 107, + 261, + 506, + 302 + ], + "type": "text", + "content": "[122] Raghav Goyal, Samira Ebrahimi Kahou, Vincent Michalski, Joanna Materzynska, Susanne Westphal, Heuna Kim, Valentin Haenel, Ingo Fruend, Peter Yianilos, Moritz Mueller-Freitag, et al. The\" something something\" video database for learning and evaluating visual common sense. In Proceedings of the IEEE international conference on computer vision, pages 5842-5850, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 308, + 504, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 308, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 107, + 308, + 504, + 340 + ], + "type": "text", + "content": "[123] Paul Voigtlaender, Soravit Changpinyo, Jordi Pont-Tuset, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with video localized narratives. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2461-2471, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 346, + 504, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 504, + 378 + ], + "type": "text", + "content": "[124] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 384, + 506, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 384, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 506, + 415 + ], + "type": "text", + "content": "[125] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 422, + 506, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 422, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 107, + 422, + 506, + 463 + ], + "type": "text", + "content": "[126] Soichiro Fujita, Tsutomu Hirao, Hidetakam Kamigaito, Manabu Okumura, and Masaaki Nagata. Soda: Story oriented dense video captioning evaluation framework. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VI 16, pages 517-531. Springer, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 469, + 504, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 504, + 491 + ], + "type": "text", + "content": "[127] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 11:635-651, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 497, + 504, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 497, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 107, + 497, + 504, + 518 + ], + "type": "text", + "content": "[128] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 525, + 504, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 525, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 107, + 525, + 504, + 556 + ], + "type": "text", + "content": "[129] Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 563, + 504, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 563, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 107, + 563, + 504, + 583 + ], + "type": "text", + "content": "[130] Tri Dao. Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 590, + 504, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 590, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 107, + 590, + 504, + 612 + ], + "type": "text", + "content": "[131] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Alan Lerer. Automatic differentiation in pytorch, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 618, + 400, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 618, + 400, + 629 + ], + "spans": [ + { + "bbox": [ + 107, + 618, + 400, + 629 + ], + "type": "text", + "content": "[132] Montalvo Pablo and Wightman Ross. PDF association dataset (pdfa), 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 635, + 410, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 635, + 410, + 647 + ], + "spans": [ + { + "bbox": [ + 107, + 635, + 410, + 647 + ], + "type": "text", + "content": "[133] Montalvo Pablo and Wightman Ross. Industry documents library (idl), 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 653, + 504, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 653, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 107, + 653, + 504, + 684 + ], + "type": "text", + "content": "[134] Lei Li, Yuqi Wang, Runxin Xu, Peiyi Wang, Xiachong Feng, Lingpeng Kong, and Qi Liu. Multimodal arxiv: A dataset for improving scientific comprehension of large vision-language models. arXiv preprint arXiv:2403.00231, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 691, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 504, + 723 + ], + "type": "text", + "content": "[135] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 114 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 114 + ], + "type": "text", + "content": "[136] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, Tom Duerig, and Vittorio Ferrari. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. IJCV, 2020." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 121, + 505, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 121, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 505, + 152 + ], + "type": "text", + "content": "[137] Rowan Zellers, Ximing Lu, Jack Hessel, Youngjae Yu, Jae Sung Park, Jize Cao, Ali Farhadi, and Yejin Choi. Merlot: Multimodal neural script knowledge models. Advances in neural information processing systems, 34:23634-23651, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 159, + 505, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 159, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 159, + 505, + 199 + ], + "type": "text", + "content": "[138] Mathew Monfort, SouYoung Jin, Alexander Liu, David Harwath, Rogerio Feris, James Glass, and Aude Oliva. Spoken moments: Learning joint audio-visual representations from video descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14871–14881, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 206, + 505, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 206, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 106, + 206, + 505, + 247 + ], + "type": "text", + "content": "[139] Gunnar A Sigurdsson, Gúl Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 510-526. Springer, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 255, + 505, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 255, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 505, + 285 + ], + "type": "text", + "content": "[140] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pages 5803-5812, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 293, + 505, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 293, + 505, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 324 + ], + "type": "text", + "content": "[141] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with " + }, + { + "bbox": [ + 106, + 293, + 505, + 324 + ], + "type": "inline_equation", + "content": "2.8\\mathrm{m}" + }, + { + "bbox": [ + 106, + 293, + 505, + 324 + ], + "type": "text", + "content": " challenging questions. arXiv preprint arXiv:2502.13124, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 331, + 505, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 331, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 505, + 352 + ], + "type": "text", + "content": "[142] Kushal Kafle, Scott Cohen, Brian Price, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In CVPR, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 358, + 505, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 358, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 358, + 505, + 389 + ], + "type": "text", + "content": "[143] Nitesh Methani, Pritha Ganguly, Mitesh M. Khapra, and Pratyush Kumar. Plotqa: Reasoning over scientific plots. In The IEEE Winter Conference on Applications of Computer Vision (WACV), March 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 396, + 505, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 396, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 505, + 418 + ], + "type": "text", + "content": "[144] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 425, + 505, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 425, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 425, + 505, + 456 + ], + "type": "text", + "content": "[145] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 947-952, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 462, + 505, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 462, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 106, + 462, + 505, + 484 + ], + "type": "text", + "content": "[146] Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 491, + 505, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 491, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 106, + 491, + 505, + 512 + ], + "type": "text", + "content": "[147] Samira Ebrahimi Kahou, Vincent Michalski, Adam Atkinson, Akos Kadar, Adam Trischler, and Yoshua Bengio. Figureqa: An annotated figure dataset for visual reasoning, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 518, + 505, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 518, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 505, + 560 + ], + "type": "text", + "content": "[148] Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. The hateful memes challenge: Detecting hate speech in multimodal memes. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 2611-2624. Curran Associates, Inc., 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 567, + 505, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 567, + 505, + 588 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 505, + 588 + ], + "type": "text", + "content": "[149] Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C. Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning, 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 595, + 505, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 595, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 595, + 505, + 635 + ], + "type": "text", + "content": "[150] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. In The 35th Conference on Neural Information Processing Systems (NeurIPS) Track on Datasets and Benchmarks, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 643, + 505, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 643, + 505, + 664 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 505, + 664 + ], + "type": "text", + "content": "[151] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 671, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 671, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 505, + 722 + ], + "type": "text", + "content": "[152] Yilun Zhao, Chen Zhao, Linyong Nan, Zhenting Qi, Wenlin Zhang, Xiangru Tang, Boyu Mi, and Dragomir Radev. Robut: A systematic study of table qa robustness against human-annotated adversarial perturbations. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6064–6081, Toronto, Canada, July 2023. Association for Computational Linguistics." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[153] Hugo Laurençon, Léo Tronchon, and Victor Sanh. Unlocking the conversion of web screenshots into html code with the websight dataset, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 100, + 504, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 100, + 504, + 122 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 504, + 122 + ], + "type": "text", + "content": "[154] Yuke Zhu, Oliver Groth, Michael Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. In IEEE Conference on Computer Vision and Pattern Recognition, 2016." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 129, + 505, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 129, + 505, + 150 + ], + "spans": [ + { + "bbox": [ + 107, + 129, + 505, + 150 + ], + "type": "text", + "content": "[155] Manoj Acharya, Kushal Kafle, and Christopher Kanan. Tallyqa: Answering complex counting questions. In AAAI, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 157, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 157, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 107, + 157, + 504, + 178 + ], + "type": "text", + "content": "[156] Jonas Belouadi, Anne Lauscher, and Steffen Eger. Automatikz: Text-guided synthesis of scientific vector graphics with tikz, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 186, + 506, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 186, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 186, + 506, + 217 + ], + "type": "text", + "content": "[157] Mengye Ren, Ryan Kiros, and Richard Zemel. Exploring models and data for image question answering. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 223, + 506, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 223, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 107, + 223, + 506, + 264 + ], + "type": "text", + "content": "[158] Jason Obeid and Enamul Hoque. Chart-to-text: Generating natural language descriptions for charts by adapting the transformer model. In Brian Davis, Yvette Graham, John Kelleher, and Yaji Sripada, editors, Proceedings of the 13th International Conference on Natural Language Generation, pages 138-147, Dublin, Ireland, December 2020. Association for Computational Linguistics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 271, + 504, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 271, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 107, + 271, + 504, + 293 + ], + "type": "text", + "content": "[159] Benny J. Tang, Angie Boggust, and Arvind Satyanarayan. Vistext: A benchmark for semantically rich chart captioning. In The Annual Meeting of the Association for Computational Linguistics (ACL), 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 300, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 300, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 505, + 361 + ], + "type": "text", + "content": "[160] Zhiyu Chen, Wenhu Chen, Charese Smiley, Sameena Shah, Iana Borova, Dylan Langdon, Reema Moussa, Matt Beane, Ting-Hao Huang, Bryan Routledge, and William Yang Wang. Finqa: A dataset of numerical reasoning over financial data. In Marie-Francine Moens, Xuanjing Huang, Lucia Specia, and Scott Wen-tau Yih, editors, Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3697-3711, Online and Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 368, + 506, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 368, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 107, + 368, + 506, + 399 + ], + "type": "text", + "content": "[161] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marcal Rusinol, C.V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 4290-4300, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 406, + 506, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 406, + 506, + 458 + ], + "spans": [ + { + "bbox": [ + 107, + 406, + 506, + 458 + ], + "type": "text", + "content": "[162] Fengbin Zhu, Wenqiang Lei, Youcheng Huang, Chao Wang, Shuo Zhang, Jiancheng Lv, Fuli Feng, and Tat-Seng Chua. Tat-qa: A question answering benchmark on a hybrid of tabular and textual content in finance. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3277–3287, Online, August 2021. Association for Computational Linguistics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 464, + 261, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 464, + 261, + 475 + ], + "spans": [ + { + "bbox": [ + 107, + 464, + 261, + 475 + ], + "type": "text", + "content": "[163] Chris Wendler. Renderedtext, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 482, + 504, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 482, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 107, + 482, + 504, + 513 + ], + "type": "text", + "content": "[164] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. Raven: A dataset for relational and analogical visual reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 520, + 504, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 520, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 107, + 520, + 504, + 541 + ], + "type": "text", + "content": "[165] Urs-Viktor Marti and H. Bunke. Theiam-database:An english sentence database for offline handwriting recognition.International Journal on Document Analysis and Recognition,5:39-46,11 2002." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 548, + 504, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 548, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 107, + 548, + 504, + 579 + ], + "type": "text", + "content": "[166] Pan Lu, Liang Qiu, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, Tanmay Rajpurohit, Peter Clark, and Ashwin Kalyan. Dynamic prompt learning via policy gradient for semi-structured mathematical reasoning. In International Conference on Learning Representations (ICLR), 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 586, + 504, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 504, + 608 + ], + "type": "text", + "content": "[167] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 614, + 506, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 614, + 506, + 656 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 506, + 656 + ], + "type": "text", + "content": "[168] Bryan Wang, Gang Li, Xin Zhou, Zhourong Chen, Tovi Grossman, and Yang Li. Screen2words: Automatic mobile ui summarization with multimodal learning. In The 34th Annual ACM Symposium on User Interface Software and Technology, UIST '21, page 498-510, New York, NY, USA, 2021. Association for Computing Machinery." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 662, + 410, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 662, + 410, + 673 + ], + "spans": [ + { + "bbox": [ + 107, + 662, + 410, + 673 + ], + "type": "text", + "content": "[169] Fangyu Liu, Guy Emerson, and Nigel Collier. Visual spatial reasoning, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 681, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 681, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 681, + 504, + 721 + ], + "type": "text", + "content": "[170] Aniruddha Kembhavi, Minjoon Seo, Dustin Schwenk, Jonghyun Choi, Ali Farhadi, and Hannaneh Hajishirzi. Are you smarter than a sixth grader? textbook question answering for multimodal machine comprehension. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5376-5384, 2017." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[171] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. Visualmrc: Machine reading comprehension on document images. In AAAI, 2021." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 100, + 505, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 100, + 505, + 124 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 505, + 124 + ], + "type": "text", + "content": "[172] Jason Lau, Soumya Gayen, Asma Ben Abacha, and Dina Demner-Fushman. A dataset of clinically generated visual questions and answers about radiology images. Scientific Data, 5:180251, 11 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 129, + 505, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 129, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 505, + 181 + ], + "type": "text", + "content": "[173] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, Jian-Guang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio, editors, Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1094-1110, Dublin, Ireland, May 2022. Association for Computational Linguistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 186, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 505, + 228 + ], + "type": "text", + "content": "[174] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. In The Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021), 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 236, + 265, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 236, + 265, + 247 + ], + "spans": [ + { + "bbox": [ + 107, + 236, + 265, + 247 + ], + "type": "text", + "content": "[175] Diagram image to text dataset, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 253, + 504, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 253, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 107, + 253, + 504, + 275 + ], + "type": "text", + "content": "[176] Bo Li, Yuanhan Zhang, Liangyu Chen, Jinghao Wang, Fanyi Pu, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Mimic-it: Multi-modal in-context instruction tuning, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 281, + 505, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 281, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 505, + 323 + ], + "type": "text", + "content": "[177] Yilun Zhao, Yunxiang Li, Chenying Li, and Rui Zhang. Multihiertt: Numerical reasoning over multi hierarchical tabular and textual data. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6588-6600, Dublin, Ireland, May 2022. Association for Computational Linguistics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 329, + 505, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 329, + 505, + 371 + ], + "spans": [ + { + "bbox": [ + 106, + 329, + 505, + 371 + ], + "type": "text", + "content": "[178] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Anna Korhonen, David Traum, and Lluis Márquez, editors, Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6418-6428, Florence, Italy, July 2019. Association for Computational Linguistics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 377, + 505, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 377, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 377, + 505, + 419 + ], + "type": "text", + "content": "[179] Harsh Jhamtani et al. Learning to describe differences between pairs of similar images. In Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii, editors, Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4024-4034, Brussels, Belgium, October-November 2018. Association for Computational Linguistics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 426, + 505, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 426, + 505, + 457 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 505, + 457 + ], + "type": "text", + "content": "[180] Haoping Bai, Shancong Mou, Tatiana Likhomanenko, Ramazan Gokberk Cinbis, Oncel Tuzel, Ping Huang, Jiulong Shan, Jianjun Shi, and Meng Cao. Vision datasets: A benchmark for vision-based industrial inspection. arXiv preprint arXiv:2306.07890, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 464, + 505, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 464, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 106, + 464, + 505, + 496 + ], + "type": "text", + "content": "[181] Tanmay Gupta, Dustin Schwenk, Ali Farhadi, Derek Hoiem, and Aniruddha Kembhavi. Imagine this! scripts to compositions to videos. In Proceedings of the European conference on computer vision (ECCV), pages 598-613, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 502, + 504, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 502, + 504, + 524 + ], + "spans": [ + { + "bbox": [ + 107, + 502, + 504, + 524 + ], + "type": "text", + "content": "[182] Benno Krojer, Vaibhav Adlakha, Vibhav Vineet, Yash Goyal, Edoardo Ponti, and Siva Reddy. Image retrieval from contextual descriptions. arXiv preprint arXiv:2203.15867, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 530, + 505, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 530, + 505, + 562 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 505, + 562 + ], + "type": "text", + "content": "[183] Phillip Isola, Joseph J Lim, and Edward H Adelson. Discovering states and transformations in image collections. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1383-1391, 2015." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 568, + 505, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 568, + 505, + 600 + ], + "spans": [ + { + "bbox": [ + 106, + 568, + 505, + 600 + ], + "type": "text", + "content": "[184] Yingshan Chang, Mridu Narang, Hisami Suzuki, Guihong Cao, Jianfeng Gao, and Yonatan Bisk. Webqa: Multihop and multimodal qa. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16495-16504, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 606, + 505, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 606, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 107, + 606, + 505, + 628 + ], + "type": "text", + "content": "[185] Maxwell Forbes, Christine Kaeser-Chen, Piyush Sharma, and Serge Belongie. Neural naturalist: Generating fine-grained image comparisons. arXiv preprint arXiv:1909.04101, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 634, + 505, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 634, + 505, + 666 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 505, + 666 + ], + "type": "text", + "content": "[186] Hareesh Ravi, Kushal Kafle, Scott Cohen, Jonathan Brandt, and Mubbasir Kapadia. Aesop: Abstract encoding of stories, objects, and pictures. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2052-2063, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 672, + 505, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 672, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 106, + 672, + 505, + 695 + ], + "type": "text", + "content": "[187] Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Nazli Ikizler-Cinbis. Recipeqa: A challenge dataset for multimodal comprehension of cooking recipes. arXiv preprint arXiv:1809.00812, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 700, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 700, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 505, + 723 + ], + "type": "text", + "content": "[188] Dong Huk Park, Trevor Darrell, and Anna Rohrbach. Robust change captioning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4624-4633, 2019." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "[189] Rumeysa Bodur, Erhan Gundogdu, Binod Bhattarai, Tae-Kyun Kim, Michael Donoser, and Loris Bazzani. iedit: Localised text-guided image editing with weak supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7426-7435, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 505, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 505, + 162 + ], + "type": "text", + "content": "[190] Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Chengqing Zong and Michael Strube, editors, Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1470–1480, Beijing, China, July 2015. Association for Computational Linguistics." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 167, + 504, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 167, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 107, + 167, + 504, + 190 + ], + "type": "text", + "content": "[191] Ye Yuan, Xiao Liu, Wondimu Dikubab, Hui Liu, Zhilong Ji, Zhongqin Wu, and Xiang Bai. Syntax-aware network for handwritten mathematical expression recognition. arXiv preprint arXiv:2203.01601, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 504, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 504, + 226 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 504, + 226 + ], + "type": "text", + "content": "[192] Yasumasa Onoe, Sunayana Rane, Zachary Berger, Yonatan Bitton, Jaemin Cho, Roopal Garg, Alexander Ku, Zarana Parekh, Jordi Pont-Tuset, Garrett Tanzer, et al. Docci: Descriptions of connected and contrasting images. In European Conference on Computer Vision, pages 291-309. Springer, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 232, + 506, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 232, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 506, + 274 + ], + "type": "text", + "content": "[193] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating clip-style models on dense captions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26700-26709, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 279, + 506, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 279, + 506, + 311 + ], + "spans": [ + { + "bbox": [ + 107, + 279, + 506, + 311 + ], + "type": "text", + "content": "[194] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen-tau Yih, et al. Altogether: Image captioning via re-aligning alt-text. arXiv preprint arXiv:2410.17251, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 316, + 504, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 316, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 107, + 316, + 504, + 348 + ], + "type": "text", + "content": "[195] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 354, + 506, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 354, + 506, + 395 + ], + "spans": [ + { + "bbox": [ + 107, + 354, + 506, + 395 + ], + "type": "text", + "content": "[196] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE international conference on computer vision, pages 2641–2649, 2015." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 402, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 402, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 107, + 402, + 504, + 434 + ], + "type": "text", + "content": "[197] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 787-798, 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 438, + 504, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 438, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 107, + 438, + 504, + 471 + ], + "type": "text", + "content": "[198] Xiyao Wang, Yuhang Zhou, Xiaoyu Liu, Hongjin Lu, Yuancheng Xu, Feihong He, Jaehong Yoon, Taixi Lu, Gedas Bertasius, Mohit Bansal, Huaxiu Yao, and Furong Huang. Mementos: A comprehensive benchmark for multimodal large language model reasoning over image sequences, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 476, + 506, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 476, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 107, + 476, + 506, + 508 + ], + "type": "text", + "content": "[199] Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23056-23065, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 514, + 506, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 514, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 107, + 514, + 506, + 545 + ], + "type": "text", + "content": "[200] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 551, + 504, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 551, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 107, + 551, + 504, + 573 + ], + "type": "text", + "content": "[201] Nazneen Rajani, Lewis Tunstall, Edward Beeching, Nathan Lambert, Alexander M. Rush, and Thomas Wolf. No robots. https://huggingface.co/datasets/HuggingFaceH4/no Robots, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 578, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 506, + 609 + ], + "type": "text", + "content": "[202] Aida Amini, Saadia Gabriel, Peter Lin, Rik Koncel-Kedziorski, Yejin Choi, and Hannaneh Hajishirzi. Mathqa: Towards interpretable math word problem solving with operation-based formalisms. arXiv preprint arXiv:1905.13319, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 616, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 506, + 647 + ], + "type": "text", + "content": "[203] Chunting Zhou, Pengfei Liu, Puxin Xu, Srinivasan Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, et al. Lima: Less is more for alignment. Advances in Neural Information Processing Systems, 36:55006-55021, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 653, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 653, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 107, + 653, + 504, + 685 + ], + "type": "text", + "content": "[204] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 691, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 504, + 722 + ], + "type": "text", + "content": "[205] Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. Finetuned language models are zero-shot learners. In International Conference on Learning Representations." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "text", + "content": "[206] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 504, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 504, + 144 + ], + "type": "text", + "content": "[207] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 152, + 504, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 152, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 107, + 152, + 504, + 182 + ], + "type": "text", + "content": "[208] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 190, + 504, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 190, + 504, + 221 + ], + "spans": [ + { + "bbox": [ + 107, + 190, + 504, + 221 + ], + "type": "text", + "content": "[209] Kaichen Zhang, Bo Li, Peiyuan Zhang, Fanyi Pu, Joshua Adrian Cahyono, Kairui Hu, Shuai Liu, Yuhan Zhang, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Reality check on the evaluation of large multimodal models, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 229, + 504, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 229, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 504, + 270 + ], + "type": "text", + "content": "[210] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multimodality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 279, + 504, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 279, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 107, + 279, + 504, + 310 + ], + "type": "text", + "content": "[211] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 318, + 504, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 318, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 107, + 318, + 504, + 339 + ], + "type": "text", + "content": "[212] Bin Yan, Yi Jiang, Jiannan Wu, Dong Wang, Zehuan Yuan, Ping Luo, and Huchuan Lu. Universal instance perception as object discovery and retrieval. In CVPR, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 347, + 504, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 347, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 107, + 347, + 504, + 379 + ], + "type": "text", + "content": "[213] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 386, + 504, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 386, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 107, + 386, + 504, + 418 + ], + "type": "text", + "content": "[214] Jang Hyun Cho, Boris Ivanovic, Yulong Cao, Edward Schmerling, Yue Wang, Xinshuo Weng, Boyi Li, Yurong You, Philipp Kraehenbuehl, Yan Wang, and Marco Pavone. Language-image models with 3d understanding. In The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 426, + 505, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 426, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 107, + 426, + 505, + 467 + ], + "type": "text", + "content": "[215] Yale Song, Eugene Byrne, Tushar Nagarajan, Huiyu Wang, Miguel Martin, and Lorenzo Torresani. Ego4d goal-step: Toward hierarchical understanding of procedural activities. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 38863-38886. Curran Associates, Inc., 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 475, + 505, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 475, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 107, + 475, + 505, + 506 + ], + "type": "text", + "content": "[216] Triantafyllos Afouras, Effrosyni Mavroudi, Tushar Nagarajan, Huiyu Wang, and Lorenzo Torresani. HT-step: Aligning instructional articles with how-to videos. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 514, + 504, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 514, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 107, + 514, + 504, + 545 + ], + "type": "text", + "content": "[217] Effrosyni Mavroudi, Triantafyllos Afouras, and Lorenzo Torresani. Learning to ground instructional articles in videos through narrations. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 15201-15213, October 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 553, + 504, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 553, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 107, + 553, + 504, + 585 + ], + "type": "text", + "content": "[218] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 593, + 504, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 593, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 107, + 593, + 504, + 624 + ], + "type": "text", + "content": "[219] Hyolim Kang, Jinwoo Kim, Taehyun Kim, and Seon Joo Kim. Uboco: Unsupervised boundary contrastive learning for generic event boundary detection. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20041-20050, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 632, + 504, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 632, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 107, + 632, + 504, + 663 + ], + "type": "text", + "content": "[220] Zexing Du, Xue Wang, Guoqing Zhou, and Qing Wang. Fast and unsupervised action boundary detection for action segmentation. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3313-3322, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 671, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 671, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 107, + 671, + 504, + 693 + ], + "type": "text", + "content": "[221] PySceneDetect: Video Cut Detection and Analysis Tool, https://github.com/breakthrough/pyscenedetect." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 700, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 700, + 504, + 722 + ], + "type": "text", + "content": "[222] J. S. Chung and A. Zisserman. Out of time: automated lip sync in the wild. In Workshop on Multi-view Lip-reading, ACCV, 2016." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 586 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "text", + "content": "[223] Zi-Yi Dou, Xitong Yang, Tushar Nagarajan, Huiyu Wang, Jing Huang, Nanyun Peng, Kris Kitani, and Fu-Jen Chu. Unlocking exocentric video-language data for egocentric video representation learning. ArXiv, abs/2408.03567, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 504, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 504, + 132 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 504, + 132 + ], + "type": "text", + "content": "[224] Dandan Shan, Jiaqi Geng, Michelle Shu, and David Fouhey. Understanding human hands in contact at internet scale. In CVPR, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 138, + 504, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 138, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 107, + 138, + 504, + 160 + ], + "type": "text", + "content": "[225] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In Proceedings of the 37th International Conference on Neural Information Processing Systems, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 167, + 504, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 504, + 207 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 504, + 207 + ], + "type": "text", + "content": "[226] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 215, + 504, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 215, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 107, + 215, + 504, + 246 + ], + "type": "text", + "content": "[227] Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. Perceiver io: A general architecture for structured inputs & outputs. ICLR, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 252, + 504, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 252, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 107, + 252, + 504, + 283 + ], + "type": "text", + "content": "[228] F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung. A benchmark dataset and evaluation methodology for video object segmentation. In Computer Vision and Pattern Recognition, 2016." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 290, + 505, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 290, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 107, + 290, + 505, + 320 + ], + "type": "text", + "content": "[229] Sergi Caelles, Jordi Pont-Tuset, Federico Perazzi, Alberto Montes, Kevis-Kokitsi Maninis, and Luc Van Gool. The 2019 davis challenge on vos: Unsupervised multi-object segmentation. arXiv:1905.00737, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 328, + 504, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 328, + 504, + 359 + ], + "spans": [ + { + "bbox": [ + 107, + 328, + 504, + 359 + ], + "type": "text", + "content": "[230] Yan Yan, Chenliang Xu, Dawen Cai, and Jason J Corso. Weakly supervised actor-action segmentation via robust multi-task ranking. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1298-1307, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 365, + 504, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 365, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 107, + 365, + 504, + 388 + ], + "type": "text", + "content": "[231] Ujjal Kr Dutta, Mehrtash Harandi, and Chellu Chandra Sekhar. Unsupervised deep metric learning via orthogonality based probabilistic loss. IEEE Transactions on Artificial Intelligence, 1(1):74-84, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 394, + 505, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 394, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 107, + 394, + 505, + 425 + ], + "type": "text", + "content": "[232] Luowei Zhou, Yannis Kalantidis, Xinlei Chen, Jason J Corso, and Marcus Rohrbach. Grounded video description. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6578-6587, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 431, + 505, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 431, + 505, + 463 + ], + "spans": [ + { + "bbox": [ + 107, + 431, + 505, + 463 + ], + "type": "text", + "content": "[233] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 469, + 505, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 505, + 501 + ], + "type": "text", + "content": "[234] Zongheng Tang, Yue Liao, Si Liu, Guanbin Li, Xiaojie Jin, Hongxu Jiang, Qian Yu, and Dong Xu. Human-centric spatio-temporal video grounding with visual transformers. IEEE Transactions on Circuits and Systems for Video Technology, 32(12):8238-8249, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 507, + 505, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 507, + 505, + 539 + ], + "spans": [ + { + "bbox": [ + 107, + 507, + 505, + 539 + ], + "type": "text", + "content": "[235] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2694-2703, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 544, + 505, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 544, + 505, + 586 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 505, + 586 + ], + "type": "text", + "content": "[236] Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16375-16387, 2022." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_content_list.json b/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..54fbd71131ac19b40d534a79a6fc26eb48a377eb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_content_list.json @@ -0,0 +1,5672 @@ +[ + { + "type": "text", + "text": "Perception Encoder: The best visual embeddings are not at the output of the network", + "text_level": 1, + "bbox": [ + 138, + 98, + 831, + 150 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Daniel Bolya $^{1,\\ast}$ , Po-Yao Huang $^{1,\\ast}$ , Peize Sun $^{1,\\ast}$ , Jang Hyun Cho $^{1,2,\\ast,\\dagger}$ , Andrea Madotto $^{1,\\ast}$ , Chen Wei $^{1}$ , Tengyu Ma $^{1}$ , Jiale Zhi $^{1}$ , Jathushan Rajasegaran $^{1}$ , Hanoona Rasheed $^{3,\\dagger}$ , Junke Wang $^{4,\\dagger}$ , Marco Monteiro $^{1}$ , Hu Xu $^{1}$ , Shiyu Dong $^{5}$ , Nikhila Ravi $^{1}$ , Daniel Li $^{1}$ , Piotr Dólár $^{1}$ , Christoph Feichtenhofer $^{1}$", + "bbox": [ + 137, + 154, + 859, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Meta FAIR, $^{2}$ UT Austin, $^{3}$ MBZUAI, $^{4}$ Fudan University, $^{5}$ Meta Reality Labs *Joint first author, †Work done during internships at Meta", + "bbox": [ + 138, + 207, + 699, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce Perception Encoder (PE), a state-of-the-art vision encoder for image and video understanding trained via simple vision-language learning. Traditionally, vision encoders have relied on a variety of pretraining objectives, each tailored to specific downstream tasks such as classification, captioning, or localization. Surprisingly, after scaling our carefully tuned image pretraining recipe and refining with our robust video data engine, we find that contrastive vision-language training alone can produce strong, general embeddings for all of these downstream tasks. There is only one caveat: these embeddings are hidden within the intermediate layers of the network. To draw them out, we introduce two alignment methods: language alignment for multimodal language modeling, and spatial alignment for dense prediction. Together, our PE family of models achieves best-in-class results on a wide variety of tasks, including (1) zero-shot image and video classification and retrieval, simultaneously obtaining 86.6 average zero-shot ImageNet robustness and 76.9 zero-shot Kinetics-400 video classification; (2) document, image, and video Q&A, enabling 94.6 DocVQA, 80.9 InfographicVQA, and 82.7 PerceptionTest with an 8B LLM; and (3) spatial tasks such as detection, tracking, and depth estimation, setting a new COCO state-of-the-art of 66.0 box mAP. To foster further research, we release our models, code, and novel dataset of synthetically and human-annotated videos.", + "bbox": [ + 135, + 256, + 859, + 484 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Code: https://github.com/facebookresearch/perception_models", + "bbox": [ + 138, + 502, + 571, + 516 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dataset: https://ai.meta.com/datasets/pe-video/", + "bbox": [ + 140, + 518, + 472, + 531 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meta", + "bbox": [ + 784, + 518, + 859, + 532 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 109, + 571, + 271, + 589 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "For the last decade in computer vision, pretrained vision encoders have been the core building block for most applications requiring perception. From million-scale ImageNet [26] pretrained convolutional networks [42, 61, 81, 124, 131] to billion-scale web-pretrained transformers [19, 24, 29, 33, 54, 102, 130, 152, 158], the dominant strategy in vision has consistently been to adapt large-scale pretrained encoders to downstream tasks.", + "bbox": [ + 107, + 601, + 887, + 662 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "There are many pretraining objectives today, each with distinct characteristics and each yielding representations better suited for specific tasks: vision-language contrastive losses [106, 160] learn a global vision and language embedding well-suited for zero-shot classification and retrieval as well as provide vision-language alignment for open-world [69, 94] and generative tasks [108, 114]; captioning losses [37, 137] learn to predict image descriptions using a language decoder, which transfers well to downstream multimodal language model (MLLM) tasks; and spatially self-supervised losses [44, 98] learn dense spatial correspondences without language supervision, making them useful for tasks requiring precise localization like object detection.", + "bbox": [ + 107, + 669, + 887, + 776 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Many works are now attempting to combine two or more of these techniques in different ways [19, 34, 35, 37, 45, 90, 110, 158]. While many have been successful, the complexity of these strategies grows exponentially with number of use cases, which can make scaling difficult. There has not yet been shown a single, simple, and easily scalable pretraining technique that can learn state-of-the-art features for all downstream tasks.", + "bbox": [ + 107, + 782, + 887, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work we discover that global vision-language contrastive learning alone can be one such approach. After building a state-of-the-art contrastive model for image and video, we found a surprising result: inside the model were specific features aligned to OCR, VQA, grounding, detection, depth estimation, and tracking. Compared to the state-of-the-art models with captioning [37] and spatially self-supervised [98] pretraining, our", + "bbox": [ + 107, + 849, + 888, + 912 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13181v2 [cs.CV] 28 Apr 2025", + "bbox": [ + 22, + 260, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 936, + 503, + 948 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0a3b7c226af8ac168ff6731a42e3b174d5240bddd13e3945533cd8ad5d5e2282.jpg", + "image_caption": [ + "Figure 1 Perception Encoder (PE) is a family of large-scale vision encoder models with state-of-the-art performance on a large variety of vision tasks. By using a robust contrastive pretraining recipe and finetuning on synthetically aligned videos, PE not only outperforms all existing models on classification and retrieval (§2), but it also internally produces strong, general features that scale for downstream tasks (§3). PE unlocks the ability for large-scale contrastive pretraining to transfer to downstream tasks with alignment tuning to capitalize on those general features (§4, §5)." + ], + "image_footnote": [], + "bbox": [ + 112, + 78, + 883, + 209 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "contrastive encoder has specific layers that, when used as frozen features, matches or exceeds the performance of the other two pretraining techniques on tasks they should be the best at. The only problem is—these features exist at different layers for each task. By exploiting this phenomenon with alignment tuning, we show it is possible to align these features to the end of the network in order to create state-of-the-art encoders for downstream MLLM and spatial tasks—all following the same easily scalable contrastive pretraining.", + "bbox": [ + 107, + 310, + 883, + 387 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We begin by building $\\mathrm{PE}_{\\mathrm{core}}$ (Fig. 1, left), a large-scale contrastively pretrained model with state-of-the-art zero-shot performance on both images and video ( $\\S 2$ ). To accomplish this, we first focus on developing a strong image-only contrastive pretraining recipe to extract general knowledge from billion-scale image-text data. Keeping the data and training FLOPs fixed, this recipe significantly improves upon vanilla CLIP in both absolute performance and robustness ( $\\S 2.1$ ). We then use the resulting model as a frame-based encoder to develop a video data engine for generating well-aligned video captions. Finetuning on this synthetic video-text data substantially improves performance on both image and video classification and retrieval tasks ( $\\S 2.2$ ). Motivated by this success, we release a large portion of the data used to train the engine: PE Video Dataset (PVD), consisting of 1M diverse videos with 120K human-refined annotations ( $\\S 2.3$ ). Finally, we scale our robust image pretraining and well-aligned video finetuning strategy to 2B parameters to produce $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ ( $\\S 2.4$ ), a single unified encoder that outperforms SigLIP2 [138] on zero-shot image tasks and InternVideo2 [146] on most zero-shot video tasks. We further transfer this power to smaller model scales through distillation.", + "bbox": [ + 107, + 393, + 883, + 575 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With the strongest image and video recognition model in hand, we shift our focus to downstream tasks. Remarkably, despite being pretrained with CLIP loss, we find that the intermediate layers of $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ can rival AIMv2-3B [37] on language tasks and DINOv2-g [98] on spatial tasks, both of which among the strongest pretrained models in their respective domains. Upon investigation, we attribute this capability to our robust image pretraining strategy, which appears to have unlocked the potential of contrastive pretraining to scale effectively for downstream tasks (§3). However, a challenge remains: the model does not naturally output these features, keeping them hidden internally. To address this, we introduce two alignment tuning methods (Fig. 1, right) to extract these strong, general features.", + "bbox": [ + 107, + 582, + 883, + 704 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "First, in §4, we investigate the most effective technique to align features to the end of the network by adapting to a large language model. This language alignment enables us to construct $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ , which individually outperforms all other popular vision encoders for MLLM tasks. Moreover, when paired with our Perception Language Model (PLM) [21], the combination rivals the latest state-of-the-art MLLMs, like InternVL3 [168]", + "bbox": [ + 107, + 710, + 883, + 772 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Second, in §5, we identify a dichotomy in the layers optimal for spatial tasks. By visualizing the features and pinpointing the explicit reason for this dichotomy, we develop a straightforward spatial alignment approach: distilling from the model's own frozen features to achieve most of the alignment, complemented by a novel use of SAM 2 [111] for spatial correspondence distillation to refine the process. The resulting $\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}$ not only outperforms other popular models in depth estimation, tracking, and semantic segmentation, but also sets a new absolute state-of-the-art on COCO [76] detection with a much simpler decoder.", + "bbox": [ + 107, + 777, + 883, + 869 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With this family of checkpoints, Perception Encoder unlocks the potential to scale one simple pretraining method to solve many downstream vision tasks. We are releasing our models, code, and PE Video Dataset.", + "bbox": [ + 107, + 876, + 883, + 906 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Perception Encoder: Core", + "text_level": 1, + "bbox": [ + 109, + 80, + 406, + 99 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To build Perception Encoder (PE), we start by training a large-scale, robust, and highly performant vision-language contrastive model for image and video. We have two objectives: first, to enhance the scalability and data efficiency of contrastive training; and second, to create a unified model effective on both image and video.", + "bbox": [ + 107, + 107, + 887, + 152 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These goals are somewhat conflicting: image-text data is plentiful and training on images is efficient, but video-text data is scarce and video training is expensive. Thus, we decouple image and video training into two stages. We first develop a strong image pretraining recipe (§2.1) with several regularization techniques to create a robust starting point. Then we use the resulting image model as a frame encoder to develop a video data engine (§2.2) supported by our novel human-refined video-text dataset (§2.3) to generate aligned captions for video clips. Finally, we finetune the image encoder on the resulting aligned video data (§2.4). Using our data engine design, this short finetuning step substantially improves both image and video performance.", + "bbox": [ + 107, + 160, + 887, + 268 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Robust Image Pretraining", + "text_level": 1, + "bbox": [ + 109, + 279, + 366, + 295 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the first stage of pretraining, we want to learn as much visual information as possible from a large set of image-text data. Notably, a unique quirk of contrastive training is the loss for a given sample depends on the other samples in the batch. Because each batch is different, there is potential to learn new information every time an example is sampled, even if that sample has been seen before. Thus, we find contrastive learning to benefit from a long training schedule. To exploit this, we design our pretraining recipe with high regularization, stability, and training efficiency in mind.", + "bbox": [ + 107, + 303, + 888, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Setup. (Fig. 2.1) We track our changes on a vanilla CLIP model using an OpenCLIP [51] ViT-L/14 model at 224 resolution as a baseline. We keep the training budget fixed to around 1T GFLOPs (i.e., a ZFLOP), and train on a fixed 2.3B image-text dataset curated using the MetaCLIP [152] text-only curation pipeline. For the baseline, we use a global batch size of $32\\mathrm{K}$ , class token, AdamW [83], and train for 12B samples seen. To assess the generality of the information learned during pretraining, we report not only zero-shot ImageNet val [26] results but also the average performance across a range of robustness metrics, including ImageNet val [26], ImageNet v2 [112], ObjectNet [4], ImageNet Adversarial [47], ImageNet Rendition [46], and ImageNet Sketch [143]. As observed with other pure CLIP models [33, 106, 152], the average robustness metric performance of this vanilla recipe is much lower than ImageNet val alone.", + "bbox": [ + 109, + 405, + 439, + 723 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ceb3bd96393f5aff984c5308f6100242887bd14fdb3b1fb4bbfff4196daf8815.jpg", + "image_caption": [ + "Figure 2 Robust Image Pretraining. We tune our pretraining recipe (§2.1) to maximize performance on a fixed set of data, starting with an OpenCLIP [51] ViT-L/14 model. We report cumulative zero-shot classification results for each modification. The inner bars show robustness evaluation, calculated as the average of 6 robustness benchmarks [4, 26, 46, 47, 112, 143], and the outer bars show ImageNet val [26] alone. Several changes significantly improve robustness, indicating that ImageNet val scales more with data, while robustness can scale with refined training techniques." + ], + "image_footnote": [], + "bbox": [ + 470, + 393, + 893, + 584 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Progressive Resolution. (Fig. 2.2) To enable longer training, we first improve training efficiency. As shown in many works [70, 71, 79, 131, 136], vision encoders work well with a progressively increasing resolution schedule. Thus, we halve the training FLOPs while maintaining performance by evenly splitting the baseline 12B-sample run into 98, 154, and 224 resolution stages, with 4B samples per stage.", + "bbox": [ + 107, + 734, + 885, + 796 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Increasing Batch Size. (Fig. 2.3) We use the extra budget to double the batch size from $32\\mathrm{K}$ to $64\\mathrm{K}$ , increasing the total samples seen from 12B to 24B. Larger batch size means a higher likelihood for there to be a non-trivially novel pair of samples, i.e., hard negatives. This is akin to increasing the \"task difficulty\" of CLIP and improves ImageNet val by $+0.6\\%$ and robustness by double of that, $+1.1\\%$ .", + "bbox": [ + 107, + 806, + 885, + 868 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LAMB Optimizer. (Fig. 2.4) We switch from AdamW to LAMB [156], which is known to stabilize large batch training. More importantly, LAMB allows us to train stably with a higher learning rate of $2 \\times 10^{-3}$ compared", + "bbox": [ + 107, + 878, + 887, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "to the original $5 \\times 10^{-4}$ . We observe that starting with a high learning rate is important to allow the model to adapt to different resolutions. These factors combine for $+0.4\\%$ on ImageNet val and $+0.7\\%$ on robustness.", + "bbox": [ + 109, + 80, + 887, + 111 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Increasing Final Resolution. (Fig. 2.5) A classic finding is that parameters and resolution should be scaled together [36, 131]. Thus, we add a fourth 336 resolution stage at the end of training. To keep the training FLOPs the same, we adjust the training schedule to 10B samples at 98 resolution, 8B at 154, 4B at 224, and 2B at 336. While ImageNet val only increases by $+0.5\\%$ , robustness improves threefold, rising by $+1.4\\%$ .", + "bbox": [ + 109, + 122, + 887, + 184 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "RoPE. (Fig. 2.6) We add 2D RoPE [127] to each attention layer to improve extrapolation, keeping the original position embedding. 2D RoPE only improves ImageNet val by $+0.3\\%$ but enhances robustness by $+0.9\\%$ .", + "bbox": [ + 109, + 195, + 887, + 226 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Attention Pooling. (Fig. 2.7) We follow [160] in constructing the CLIP embedding using an attention probing transformer block. Surprisingly, we found keeping the class token as an input to this block is important for small model performance. Together, this improves ImageNet val by $+0.3\\%$ and robustness by $+0.9\\%$ .", + "bbox": [ + 109, + 237, + 887, + 284 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Tuned Data Augmentation. (Fig. 2.8) Despite training on billions of samples, we find data augmentation still important—especially for transfer to unlikely scenarios like in ObjectNet [4]. We add heavy random cropping, brightness/saturation jitter, and horizontal flip. Random cropping encourages using the entire caption, as not everything is in frame. Jitter helps low-light settings and documents. Horizontal flip improves natural images and does not hurt OCR (see §2.5). These improve robustness by $+0.7\\%$ , notably, ObjectNet by $+2.4\\%$ .", + "bbox": [ + 109, + 295, + 887, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Mask Regularization. (Fig. 2.9) As regularization, we want the model to produce the same features if some patches are not visible. However, passing the CLIP gradients through masked images may negatively alter behavior on unmasked images. Thus, we convert MaskFeat [147] into a regularization loss by duplicating and masking 1/16th of the batch. At the output, the masked tokens are aligned to their unmasked counterparts by maximizing cosine similarity. Care is taken to ensure that the CLIP and masked gradients are disjoint.", + "bbox": [ + 109, + 382, + 887, + 459 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Scaling Behavior. (Figs. 3 and 4) In Fig. 3, we show the performance of our recipe (Fig. 2.9) vs. the original CLIP recipe (Fig. 2.1) across S/14, B/14, and L/14 models. For each benchmark, our recipe scales around the same rate or better than the original CLIP recipe. On some difficult datasets like ObjectNet [4] and ImageNet Adversarial [47], our recipe shows distinctly better scaling. This indicates that the improvements in performance were not at the cost of scalability, meaning we can further benefit from scaling the model size.", + "bbox": [ + 109, + 469, + 887, + 546 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/248bf47810a642cd188b3f5505507120452fc477d1e98c80ff4049a8ab332782.jpg", + "image_caption": [ + "Figure 3 Scaling Behavior (Model Size). Results before and after our recipe changes (Fig. 2) for S/14, B/14, and L/14 models. Our recipe improves scaling for difficult metrics like ObjectNet [4] and ImageNet Adeversarial [47]." + ], + "image_footnote": [], + "bbox": [ + 117, + 558, + 241, + 636 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d1949b98e010fabdd7fb456ec09a255f8c2887f22aad365d1252475c66cdefa8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 245, + 558, + 367, + 636 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/49aa7f6eea2b488de6007811ec74e7125803de849e677ead4a356be63e1d3d17.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 558, + 496, + 636 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fd36dd7301032ff0a40f1dd187ce9e5027a7dfd89ff95854cd039651889d1b0d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 558, + 622, + 636 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3c29c8dd5ab4adbf5915fbfd3e6f44dbee77cedbf231bae611128ee005d47ba6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 558, + 751, + 636 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2c2bdea6917b4da14b0cfaa830be0cc38860e78457896c6a78d058c5db5e611e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 558, + 880, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Fig. 4, we additionally show the performance of our recipe vs. the original CLIP recipe across L/14 models trained with 120K steps (one-third schedule), 240K steps (two-thirds schedule), and 360K steps (full ablation schedule). All models are their own training runs with full learning rate annealing and the progressive resolution schedule adjusted proportionally. We see nearly linear trends for our recipe on most datasets. This suggests we can train longer for more performance, even at L scale and with 24B samples seen already.", + "bbox": [ + 109, + 688, + 887, + 765 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/553115ec45af9f6b65240ce997ff35a15124c9f853156f60dd5b3a711732ed88.jpg", + "image_caption": [ + "Figure 4 Scaling Behavior (Training Steps). Results before and after our recipe changes for an L/14 model trained with 120K, 240K, and 360K steps, adjusting the learning rate and progressive resolution schedules accordingly. Despite our recipe being much stronger than the original, there is still room for further improvement by training longer." + ], + "image_footnote": [], + "bbox": [ + 117, + 777, + 241, + 854 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4196311b231b92cff14d1bbf10a6730543bbd841e4e2ac323e2216df1265ffdd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 245, + 777, + 369, + 854 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/82b6d68c90e01f9d4518fb5aa8723e794719fc68e52e2d11a08233decc536e99.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 777, + 496, + 854 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f6fe4fbb9514dad209e0d83d6a694d7480e58bbba06ff60c92091033b5e14532.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 777, + 625, + 854 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1f522eeb6904381c6bc6ab80156024d78351fcf08325c0327bae44b86bea83f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 777, + 753, + 854 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9f90e426884e9ecb7989d4a28e0d99bfb2443af89b14067a3357237e5afd2003.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 777, + 880, + 854 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Bootstrapping a Video Data Engine with Perception Encoder", + "text_level": 1, + "bbox": [ + 109, + 79, + 653, + 97 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With a robust image pretraining recipe settled and its scaling behavior confirmed, our next step is to extend the image-only encoder to accommodate video and build a unified image-video model. Unlike web-scale image-text data, which comes in many cases with human-generated descriptive alt-text information, videos with aligned language annotation are inherently scarce. High-quality human-annotated captions for videos are even rarer. This scarcity presents a unique and significant challenge in training encoders capable of effectively processing video inputs.", + "bbox": [ + 109, + 104, + 357, + 359 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/16aa33f83b9a7e2879ae72919ee8bd13e2c641010eaf3774f481b59e4a45d689.jpg", + "image_caption": [ + "Figure 5 Video Data Engine. To create aligned video-text data for contrastive training, we use a PE-based video captioner [21] to generate a holistic video caption and an image-level captioner [82] on sampled frames. We then provide those captions as well as the original video metadata to text-only LLM [82] to synthesize a single short, aligned caption optimal for contrastive training." + ], + "image_footnote": [], + "bbox": [ + 383, + 128, + 885, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inspired by the recent success of image data engines [58, 64, 96, 111, 151], we extend this concept to develop a robust video data engine that generates well-aligned synthetic captions for a diverse set of videos, facilitating the training of a video encoder. This innovative approach represents the first large-scale exploration of its kind. In the following sections, we introduce the process of building our video data engine.", + "bbox": [ + 109, + 359, + 885, + 421 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To bootstrap our contrastive video finetuning, we focus on synthesizing video captions. We build our data engine in three stages: (1) we create a strong baseline video captioner, which we call the Perception Language Model (PLM), described in [21]; (2) we add additional high quality video data with human-refined captions to further enhance the captioner's quality; (3) we refine and summarize the generated video captions with an LLM to construct a large video dataset to use for the contrastive video finetuning of our Perception Encoder.", + "bbox": [ + 109, + 428, + 887, + 506 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Phase 1: Base Video Captioner (PLM). We build our data engine on an early version of PLM [21], a multimodal large language model with PE as the vision encoder and Llama [82] as the language decoder. We train PLM on a large-scale collection of open-access image and video datasets [21]. In total, the training dataset consists of 64.7M images and videos covering natural images, charts, documents, exocentric and egocentric videos.", + "bbox": [ + 109, + 515, + 887, + 578 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Phase 2: PLM + Refined Data. To further boost captioning performance, we collect a set of 265K videos (105K from PVD which we release, see §2.3), caption them with our base PLM model, and ask human raters to refine the captions1. We then fine-tune our base PLM model with this data, significantly improving captioning quality (see Tab. 1).", + "bbox": [ + 109, + 588, + 473, + 695 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/754240b0fdfb6b195dadcbc2f6c7fd2fc9c772c307dec34acb8e1c27fc16616c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CaptionerAuroraCap [13]VCG Diverse [87]VCG Bench [86] Score
ScoreAccScoreAcc
PLM2.251.93.165.134.3
PLM + Human-Refined Data3.471.13.679.435.2
", + "bbox": [ + 498, + 588, + 877, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1 Video Captioning. We use an early version of PLM-8B [21], consisting of our image-only PE encoder and a Llama decoder, for captioning. Adding human-refined data greatly boosts captioning performance (higher is better).", + "bbox": [ + 493, + 637, + 887, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Phase 3: LLM Summarization. We synthesize the final aligned video captions by incorporating the PLM video captions, Llama 3.2 [82] image-only frame captions, and the existing video metadata of video titles and descriptions (Fig. 5). Similar to image alt-text, video metadata contains knowledge often not covered by the image and video captioning models. Thus, combining the two leads to more comprehensive captions. We summarize video captions, frame captions, and video metadata together using the Llama 3.3 70B model to provide the final captions. The prompt used to generate the summary can be found in Appendix A.1.", + "bbox": [ + 109, + 705, + 885, + 797 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Using the Engine. Finally, we use the resulting data engine bootstrapped with an image-only checkpoint of PE to generate well-aligned, information-dense captions for a diverse set of 22M videos for contrastive finetuning.", + "bbox": [ + 109, + 808, + 887, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training with Recaptioned Videos. Our goal is to develop a unified image and video encoder. To encode videos using our existing image encoder, we uniformly sample $N = 8$ frames from video clips and extract frame-level", + "bbox": [ + 109, + 849, + 887, + 881 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1The annotators are instructed to remove, correct, and add information from the captions.", + "bbox": [ + 127, + 888, + 684, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "embeddings with the image encoder. We then apply average pooling over these frame embeddings to obtain video embeddings, which are used for contrastive learning with encoded video captions by the text encoder. Despite being extremely simple, we find this technique surprisingly effective in producing a strong joint image-video encoder. We share this finding with previous studies [19, 84], which note that simple average pooling outperforms more complex pooling strategies like attention-based compression for video.", + "bbox": [ + 109, + 80, + 887, + 157 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Ablations. In Tab. 2, we conduct an ablation study on the components of the video data engine by finetuning an intermediate image-only checkpoint on 17M of the 22M videos recaptioned by our video data engine. The results show that the video data engine significantly enhances zero-shot classification and retrieval performance for both image and video benchmarks, compared to the image-only baseline encoder (first row). Notably, using the video data engine's video-level and frame-level captions provides significant improvements over relying solely on metadata such as video title and description (second row), highlighting the importance of building a robust video data engine to compensate for noise in web videos.", + "bbox": [ + 109, + 167, + 433, + 409 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/98121d3bed5a35310ba152c9861be31ac69dd8a0d1f018191a8a6603f9f86662.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TitleDescriptionVideo CaptionFrame CaptionAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet vial [26]ImageNet vial [12]ObjectNet IV Classes [4]MS-COCO mrt→img [76]MS-COCO mrt→mrt [76]Average VideoKinetics 400 [55]Kinetics 400 [55]MSR-VTT mrt→vid [153]MSR-VTT mrt→mrt [153]48.1
72.683.377.885.849.466.850.969.768.438.027.3
75.483.278.287.147.366.056.074.173.539.037.3
78.283.578.486.856.074.360.973.873.447.648.8
✓*78.183.779.087.754.173.060.975.475.146.746.5
78.283.779.087.554.673.261.675.875.547.448.1
", + "bbox": [ + 460, + 172, + 880, + 282 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2 Video Data Engine Ablation. We ablate our video data engine in Fig. 5 by finetuning on an in-development image-only version of PE by averaging the frame embeddings to create a single video CLIP embedding. Video captions are generated by PLM trained with or without * human-refined data (see §2.3). Frame captions are generated by the Llama 3.2 vision model. Each component helps on different metrics, overall culminating in a huge boost to both image and video zero-shot performance.", + "bbox": [ + 455, + 286, + 885, + 398 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our analysis reveals that the most critical components are the video metadata and PLM's video caption; however, all components are necessary to achieve peak performance in our video data engine.", + "bbox": [ + 109, + 410, + 887, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Fig. 6, we investigate the impact of scaling recaptioned video data on a later checkpoint of the same image-only model as in Fig. 2. Notably, scaling synthetic video data demonstrates consistent improvement in both image and video benchmarks. Full results of this scaling experiment can be found in the Appendix 19.", + "bbox": [ + 109, + 446, + 887, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the top row, scaling synthetic video data consistently improves performance on image benchmarks, with monotonic improvements of $+1.1\\%$ in ObjectNet and $+1.6\\%$ in ImageNet Adversarial. ImageNet val and ImageNet v2 have smaller gains, with accuracy increases of $0.3\\%$ to $0.5\\%$ , plateauing at $\\sim 7\\mathrm{M}$ samples. We also observe a significant boost to zero-shot retrieval (here, COCO [76]) of $+3.8\\%$ to $+4.1\\%$ top-1 recall.", + "bbox": [ + 109, + 500, + 887, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The video tasks listed in the bottom row demonstrate a consistent story. We observe a significant jump in performance between none and 3M videos across all video classification tasks, indicating that there is a domain gap for image-only models that hinders their ability to perform well on video out of the box. Further scaling synthetic video data leads to substantial performance gains in both video classification and retrieval. Video classification accuracy improves consistently by $+5.6\\%$ to $+11.7\\%$ without plateauing, while video retrieval shows significant improvements of $+7.7$ to $+15.3$ top-1 recall.", + "bbox": [ + 109, + 568, + 887, + 659 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d4b5d63b0451aae57d67aaa102f3b7248bed9096bcbf90710a982c38dc6e97d3.jpg", + "image_caption": [ + "Figure 6 Video Data Scaling. Finetuning on videos recaptioned by the PE video data engine from 0M (baseline image-only model) to 17M samples consistently improves both image and video performance, both classification and retrieval." + ], + "image_footnote": [], + "bbox": [ + 122, + 679, + 875, + 862 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9ec94d9a6be8e905327609a5476b63a26195687f800eb8a20f47f4a56e5d1b8b.jpg", + "image_caption": [ + "Category: Food Preparation", + "Figure 7 PE Video Dataset Example. A sample from PVD, our released video-text dataset. Initial captions are generated by our video captioning model and then refined by human annotators. Annotators are instructed to add details and remove model hallucination. In this example, the model hallucination \"a spoon\" is removed; and more details such as \"glass bowl\" and the action \"scraping\" are added. See Appendix Fig. 18 for more." + ], + "image_footnote": [], + "bbox": [ + 117, + 119, + 879, + 191 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Model caption: \"The video shows a person making a cake. The person is seen mixing ingredients in a bowl, pouring the mixture into a pan, and adding chocolate chips. The video also shows the person using a whisk and a spatula to mix the ingredients and pour the mixture into the pan. The person is wearing a blue shirt and is seen holding a glass bowl and a spoon. The background of the video is a white marble countertop, and there is a striped towel on the counter.\"", + "bbox": [ + 127, + 212, + 480, + 297 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Human refinement: \"The video shows a person making a chocolate cake. The person mixes ingredients in a glass bowl and pours the mixture into a baking pan. Then, the person adds chocolate chips on top. The video also shows the person mixing the ingredients with an electric whisk and scraping the mixture into the pan with a spatula. The person is wearing a blue shirt and is seen holding a glass bowl. The background of the video is a white marble countertop, and there is a striped towel on the counter.\"", + "bbox": [ + 504, + 212, + 874, + 299 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "These experiments highlight the quality of our video data engine and its ability to significantly improve encoder performance, even with only a relatively modest 17M videos compared to the billions of images seen during pretraining. Our video data engine is a vital component in build a strong, unified image-video encoder.", + "bbox": [ + 109, + 392, + 887, + 439 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2.3 PE Video Dataset (PVD)", + "text_level": 1, + "bbox": [ + 109, + 455, + 356, + 470 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For the benefit of the community, we release a new video dataset: PE Video Dataset (PVD).2 PVD comprises of 1M high-quality and diverse videos with accompanying tags and descriptions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes.", + "bbox": [ + 109, + 481, + 887, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We additionally select 120K of these videos with the highest degree of motion to annotate with detailed captions by generating synthetic captions using our video captioner (§2.2) and employing 200 annotators to verify and refine them. We ask the human annotators to improve the synthetic captions by removing any hallucinations, correcting words that describe the video inaccurately, eliminating repetitive or redundant words to make the caption more concise, and adding any missing actions being performed in the video.", + "bbox": [ + 109, + 535, + 885, + 611 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We release two versions of annotations for the 120K PVD subset: (1) Human verified captions: extended summaries with an average length of 57.1 words that provide a high-level description of each video. These captions are suitable for CLIP-style training. (2) Long automated captions: detailed and fine-grained descriptions with an average length of 111.7 words that capture spatial and temporal events. These captions are ideal for fine-grained video understanding.", + "bbox": [ + 109, + 618, + 665, + 708 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/0419cec4c68d3e21eb66b1cc748ebcb2ae14852eccf08775be3e31fa04f72c48.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Videos998,862
Human Captions118,862
Total Duration4625 hrs
Duration (s)16.7±9.8
Human Caption Length57.1±25.4
Model Caption Length111.7±43.2
", + "bbox": [ + 689, + 623, + 880, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3 PVD Statistics.", + "bbox": [ + 710, + 694, + 861, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Fig. 7, we visualize a video example together with their model and human captions from PE Video Dataset (See Fig. 18 for more). The dataset statistics are summarized in Tab. 3. Finally, We use $105\\mathrm{K}$ of these refined samples to improve the data engine ( $\\S 2.2$ phase 2) and $15\\mathrm{K}$ as a high-quality video retrieval benchmark.", + "bbox": [ + 109, + 715, + 885, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "PVD Benchmark. We use 15K of the human-refined video-caption pairs as a held-out test set, which we introduce as a new video retrieval benchmark, PVD Benchmark, to evaluate finegrained video-caption alignment. We follow the format of MSR-VTT [153] to construct the benchmark. We select videos from 10 different categories, including hand actions, object interactions, food preparation, work activities, outdoor scenes, animals, water scenes, object handling, close-up shots, and nature scenes, with an overall average caption length of 51.7 words (see Appendix A.2.3 for statistics). We use PVD Benchmark to evaluate SigLIP [160], SigLIP2 [138], InternVL [19], and PE models, and the results can be found in Tab. 7.", + "bbox": [ + 109, + 773, + 887, + 878 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "$^{2}$ PVD available at https://ai.meta.com/datasets/pe-video/", + "bbox": [ + 127, + 888, + 491, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2.4 A Unified Encoder for Image and Video", + "text_level": 1, + "bbox": [ + 109, + 79, + 473, + 95 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Using a robust, scalable image pretraining recipe and video-pretraining data recaptioned by the proposed video data engine, in this section we present $\\mathsf{PE}_{\\mathrm{core}}$ , a unified image-and-video encoder.", + "bbox": [ + 109, + 104, + 883, + 133 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Model Architecture. To capitalize on the promising scaling behavior observed in §2.1, we scale the largest $\\mathrm{PE}_{\\mathrm{core}}$ model to 2B parameters3 (G scale). Tab. 4 shows the detailed model configuration of the vision and text transformers and the dimension of the output clip embedding space.", + "bbox": [ + 109, + 146, + 547, + 222 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/84e3e8bf18b5ba8bbb0729dc244ad4d6daf947726233ee1ff38ac64927363783.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ScaleTowerParamsWidthDepthMLPHeadsCLIP Dim
BVision0.09B768123072121024
Text0.31B102424409616
LVision0.32B1024244096161024
Text0.31B102424409616
GVision1.88B1536508960161280
Text0.47B128024512020
", + "bbox": [ + 576, + 145, + 883, + 223 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4 PE Model Configurations.", + "bbox": [ + 620, + 226, + 834, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Smaller Model Distillation. To maximize the performance of", + "bbox": [ + 109, + 233, + 549, + 248 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "smaller models (B and L scales in Tab. 4), we employ a distillation finetuning approach [49] using $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ as the teacher. This process involves a short finetuning schedule where both the student and teacher models encode image and text inputs separately to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence, distilling multimodal relational knowledge from the teacher into the student.", + "bbox": [ + 107, + 250, + 885, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Notably, we find that using a smaller softmax temperature for the teacher's distributions, specifically $0.5 \\times$ the temperature used for the student's distribution, significantly enhances the effectiveness of knowledge distillation. By leveraging the strong embeddings provided by $\\mathrm{PE}_{\\mathrm{core}} \\mathrm{G}$ , our short distillation finetuning schedule significantly boosts the performance of both B and L scale models of $\\mathrm{PE}_{\\mathrm{core}}$ (see Appendix C.3).", + "bbox": [ + 109, + 330, + 883, + 393 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Model Training. The training process of $\\mathrm{PE}_{\\mathrm{core}}$ involves three stages:", + "bbox": [ + 109, + 402, + 599, + 419 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Image pretraining. We scale up image pretraining to 5.4B publicly available image alt-text pairs curated with MetaCLIP [152] and a total of 86B samples seen to ensure convergence (58B for B and L). We use a global batch size of 131K, with progressive resolution from 98 to up to 448 depending on the model.", + "2. Image and video finetuning. Following the initial pretraining, we subsequently finetune the model at max resolution with a short schedule for 50M samples on the image pretraining data (as cooldown) followed by 22M samples on the recaptioned videos with a smaller learning rate and batch size. The video captions are produced using the proposed video data engine (§2.2). For each video clip, we uniformly sample 8 frames, encode them, take their average to produce a single video embedding, and align them with the corresponding video captions using the same contrastive objective in image training.", + "3. Smaller model distillation. We distill the 2B model (G scale) into smaller contrastive pretrained models at B and L scales under their final resolutions, using a short schedule that covers approximately 4B samples seen ( $\\sim 8\\%$ of the pretraining schedule) with a lower learning rate and no weight decay." + ], + "bbox": [ + 130, + 426, + 883, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The detailed training configuration and setups are listed in Appendix B.1.1.", + "bbox": [ + 109, + 630, + 656, + 646 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2.5 Core Results", + "text_level": 1, + "bbox": [ + 109, + 664, + 264, + 679 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Zero-Shot Image Results. In Tab. 5, we present $\\mathrm{PE}_{\\mathrm{core}}$ 's performance on zero-shot image benchmarks for classification and retrieval vs. the strongest existing models, including SigLIP2 [138] and proprietary models using JFT-3B [29], which is likely tuned for ImageNet. $\\mathrm{PE}_{\\mathrm{core}}$ outperforms all other contrastive models across the board on all zero-shot tasks, including the highly competitive average of zero-shot ImageNet robustness metrics [4, 26, 46, 47, 112, 143]. This marks a significant achievement, as we are the first to accomplish this in over 3 years without access to Google's internal JFT-3B [29] or WebLI [17] datasets. And at the same time, $\\mathrm{PE}_{\\mathrm{core}}$ also exceeds the existing state-of-the-art on image-text retrieval and significantly improves on fine-grained classification—the first to simultaneously hold state-of-the-art on all common zero-shot categories.", + "bbox": [ + 109, + 693, + 885, + 814 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "By harnessing the power of our video data engine, training with a relatively small dataset of 22M videos and their corresponding synthetic captions leads to substantial gains in image benchmarks, with average general image classification improving by $+0.6\\%$ with emphasis on more difficult benchmarks (notably $+1.2\\%$", + "bbox": [ + 109, + 821, + 883, + 868 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "3We employ the setup described in §2.1 except for the additional class token (only used for L and B). Interestingly, we find using the same high learning rate $(2 \\times 10^{-3})$ to perform well for G. We also did not find scaling the text encoder to be beneficial.", + "bbox": [ + 109, + 876, + 883, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 936, + 503, + 948 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/18451512f5568fe8da5336fa21d737dcaf1979fc3235a99cb9187fc3c71d5477.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Fine-Grained Classification
Avg Class.ImageNet w1 [26]ImageNet i2 [112]ObjectNet IV Classes [4]ImageNet adversarial [47]ImageNet Adversarial [48]ImageNet Renditions [46]ImageNet Sketch [143]Avg Fine.Food 107 [9]Flowers Oxford [97]Pets Oxford [100]Cars Stanford [59]Aircrafts FGC [88]Countries 2/1 [133]Scenes SUN397 [150]Satellite RESISC [20]Avg Retrieval1Zero-Shot Retrieval MS-COCO t+to ing [76]
Proprietary0.24B2246.6B84.385.786.380.682.385.695.776.1-95.191.297.9--------------------------------------------------0.24B2246.6B84.385.786.380.695.776.1-95.191.297.9-----------------------------------
BASIC [102]1.0B5764.8B85.786.380.695.776.1-95.191.297.9----------------------------72.651.266.380.492.585.786.380.695.776.1-------------------------------------------------MS-COCO t+to ing [76]MS-COCO img→to ing [76]MS-COCO img→to ing [75]
CoCa [158]1.0B5764.8B85.786.380.695.776.1-95.191.297.9---------------------72.651.266.380.492.585.786.380.695.776.1---0.24B2246.6B85.786.380.695.776.1-------------------------------------
LiT-22B [24]
", + "bbox": [ + 106, + 78, + 895, + 353 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ObjectNet, $+1.4\\%$ ImageNet Adversarial) and fine-grained classification by $+1.0\\%$ on average. Furthermore, due to the high level of detail and alignment of our synthetic captions, zero-shot retrieval is significantly boosted by $+3.6\\%$ on average. These results emphasize that training with well-aligned video text data does not just improve video performance—it creates a strictly better model for both videos and images.", + "bbox": [ + 109, + 450, + 888, + 513 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Zero-Shot Video Results. We assess the performance of $\\mathrm{PE}_{\\mathrm{core}}$ on zero-shot video benchmarks by employing the same model as a frame-based video encoder, utilizing 8 uniformly sampled frames, as described in §2.2.", + "bbox": [ + 109, + 523, + 320, + 645 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We present the corresponding video results in Tab. 6. Our base image encoder already outperforms all other image-only encoders on both zero-shot classification and retrieval, including SigLIP2-g-opt. With video finetuning, $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ significantly outperforms even native video models that use full temporal attention on video classification, and nearly matches the", + "bbox": [ + 109, + 652, + 320, + 848 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/2e35a5b4cdd10463a1e04015eaa50ce24ce1e8fd08ad30843e4c022ac3a800c4.jpg", + "table_caption": [ + "Table 5 Zero-Shot Image Results. Image zero-shot performance of $\\mathrm{PE}_{\\mathrm{core}}$ compared to the state-of-the-art for both proprietary and open models. $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ is the first vision encoder to outperform the best models trained on the proprietary JFT-3B [29] and WebLI [17] on general classification. Moreover at all model sizes, $\\mathrm{PE}_{\\mathrm{core}}$ obtains state-of-the-art results across general classification, retrieval, and finegrained classification. $\\dagger$ Re-evaluated: DFN by [130]; SigLIP and SigLIP2 by us with the same benchmark settings if not reported in [138] (see Appendix B.1.2)." + ], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolution #FramesVideo DataZero-Shot ClassificationZero-Shot Retrieval
Avg Class.Kinetics 409 [55]Kinetics 600 [55]Kinetics 700 [55]UCF 101 [126]HMDB 57 [62]Avg RetrievalMSR-VTT 304 [76]MSR-VTT 304 [76]MSVD 304 [76]MSVD 304 [76]MSVD 304 [76]ActivityNet 304 [76]ActivityNet 304 [76]
B Scale
CLIP [106]0.1B2248n/a54.358.455.146.168.943.229.230.424.240.557.29.113.2
CLIP4CLIP [84]0.1B22412n/a-------32.0-38.5---
SigLIP2-B/16†[138]0.1B2248n/a57.358.755.048.482.042.339.938.530.149.067.228.625.8
PEcoreB0.1B224822M63.965.665.155.884.648.249.947.647.350.476.739.038.4
L Scale
UMT-L [67]0.3B224825M------47.140.737.149.074.541.939.4
SigLIP2-L/16†[138]0.3B3848n/a64.165.362.556.886.749.344.741.531.453.774.235.931.5
PEcoreL0.3B336822M71.473.472.765.387.158.554.850.350.157.282.446.442.1
Unbounded Scale
InternVL [19]5.5B2248n/a-69.168.960.6---44.740.2----
InternVideo2 [146]1.0B2248102M70.773.172.864.988.853.959.951.950.958.183.360.454.8
VideoPrism-g* [164]1.1B28816619M-76.4-----39.771.0--52.750.3
SigLIP2-g-opt†[138]1.1B3848n/a68.269.867.061.890.751.846.643.134.255.874.638.333.4
PEcoreG (image only)1.9B4488n/a70.973.172.264.389.555.547.644.335.254.373.941.436.3
PEcoreG1.9B448822M74.876.976.169.190.761.158.751.249.959.785.454.751.2
", + "bbox": [ + 339, + 527, + 890, + 767 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 6 Zero-Shot Video Results. Video performance of $\\mathrm{PE}_{\\mathrm{core}}$ compared to recent video and image encoders. $\\mathrm{PE}_{\\mathrm{core}}$ obtains state-of-the-art in video classification and comparable performance on retrieval benchmarks while using only 22M videos. $^*$ Proprietary models. ${}^{+}\\mathrm{SigLIP2}$ are evaluated by us with the same zero-shot prompts frame embedding averaging strategy (as in [19, 84, 106]). See Appendix B.1.2.", + "bbox": [ + 339, + 768, + 887, + 840 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "state-of-the-art on video retrieval using a simple frame-level encoder. This result underscores the importance of our video data engine, resulting in $+3.9\\%$ on average zero-shot video classification, and a massive $+11.1\\%$ on retrieval. Moreover, $\\mathrm{PE}_{\\mathrm{core}}$ does this with much less video data compared to other video-based approaches like InternVideo2 [146] and VideoPrism [164], highlighting the benefits of a joint image-video encoder.", + "bbox": [ + 109, + 849, + 888, + 910 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/a153733586205ebf63f98fe5ca0ce22decc28af55d3f7589bc0d01e7ddca09b3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Retrieval
ObjectNet [4]ObjectNet [4]Inaturalist 2017 [140]Dollar St 58 [39, 113]TextCaps img→cat [122]TextCaps Flip img→cat [122]PVD Bench img→vidPVD Bench vid→cat
SigLIP2-B/16 [138]0.1B22410B73.659.116.955.972.069.853.960.1
PEcore B0.1B2245.4B71.958.325.952.172.371.959.861.1
SigLIP2-L/16 [138]0.3B38410B84.473.226.757.678.076.261.967.1
PEcore L0.3B3365.4B84.774.335.359.678.578.364.765.2
InternVL-C [19]5.5B2245B80.667.219.458.272.367.863.465.1
SigLIP2-g-opt [138]1.1B38410B88.078.131.559.378.876.962.567.1
PEcore G1.9B4485.4B88.279.041.162.378.878.777.076.6
", + "bbox": [ + 122, + 77, + 550, + 218 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/b442b6245b11605dc45b684c129fb444053fa41af40d35c15539a8fc5181254b.jpg", + "table_caption": [ + "Table 7 Additional Zero-Shot Results. We present several additional zero-shot benchmarks from existing datasets and our own PVD (§2.3) to address evaluation gaps left by standard benchmarks." + ], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolutionDataEncoder Probing
ImageNet [26]ImageNet [26]ImageNet [26] Attention
DINOv2-g [98]1.1B224145M83.586.5\\( 87.2^{\\dagger} \\)
RADIOv2.5-g [45]1.1B518-85.3--
AIMv2 3B [37]2.7B4487.2B--89.5
InternVL-C [19]5.5B2245B-88.2-
EVA 18B [130]17.5B2242B-88.9-
\\( PE_{core}G \\)1.9B4485.4B86.889.589.8
", + "bbox": [ + 602, + 78, + 883, + 205 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 8 Encoder Probing Results. We evaluate $\\mathrm{PE}_{\\mathrm{core}}$ G's frozen features using the typical probing methods to compare to models without zero-shot support. from [37].", + "bbox": [ + 601, + 208, + 887, + 265 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Additional Zero-Shot Benchmarks. We further evaluate $\\mathrm{PE}_{\\mathrm{core}}$ on an additional set of zero-shot classification and retrieval benchmarks we construct in Tab. 7 to address key gaps in common benchmarks. For comparison, we also evaluate SigLIP2 [138] and InternVL-C [19] on these benchmarks.", + "bbox": [ + 107, + 290, + 888, + 338 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "First, we note that the version of ObjectNet [4] that is standard to benchmark robustness (e.g., in Tab. 5) is not the full set. ObjectNet consists of 313 classes of objects in challenging and uncommon orientations, locations, and viewpoints. However, the standard version used for benchmarking is a 113 class subset of classes that overlap with ImageNet-1k [26]. Naturally, benchmarking in this way rewards performing well on ImageNet classes over generality. To remove this bias, we construct the full ObjectNet set with all classes and compare to the reduced ObjectNet set in Tab. 7. Surprisingly, we find that while $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ performs $+7.6\\%$ over InternVL-C and only $+0.2\\%$ over SigLIP2-g-opt on the reduced ObjectNet set, it performs $+11.8\\%$ over InternVL-C and $+0.9\\%$ over SigLIP2-g-opt on the full set of classes, highlighting PE's generality.", + "bbox": [ + 107, + 343, + 888, + 465 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Next, we include iNaturalist [140] as a zero-shot benchmark because of its level of specificity with 2,101 fine-grained long-tail classes. $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ outperforms the next best SigLIP2-g-opt model by $+9.6\\%$ , emphasizing PE's long tail knowledge. We then evaluate PE's cultural diversity on Dollar Street $[113]^4$ , which consists of images of under-represented populations. Here too we find $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ to outperform existing methods, with $+3.0\\%$ over SigLIP2-g-opt. Further, we test OCR performance by setting up TextCaps [122] as a retrieval dataset. Notably, $\\mathrm{PE}_{\\mathrm{core}}$ performs on par or better than SigLIP, which is known for good OCR performance. This is potentially surprising, as the horizontal flip augmentation we used during robust pretraining (S2.1) is typically thought to hurt OCR performance. However, instead it seems to have given $\\mathrm{PE}_{\\mathrm{core}}$ the ability to read backwards: we test the same TextCaps retrieval but with all images horizontally flipped. Other models suffer from this, but $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ 's performance only drops by $0.1\\%$ . Finally, we evaluate $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ on the PVD benchmark (S2.3), a challenging video retrieval task on 15K diverse and human-refined videos. Here, $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ significantly outperforms InternVL [19] by $+13.6\\%$ on text $\\rightarrow$ video and $+9.5\\%$ to SigLIP2 [138] on video $\\rightarrow$ text.", + "bbox": [ + 107, + 470, + 888, + 654 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Frozen Encoder Probing Results. To compare against models that are not capable of zero-shot classification, we additionally evaluate $\\mathrm{PE}_{\\mathrm{core}}$ using k nearest neighbors (following [98]), linear probing (following [19]), and attention probing (following [37]) on top of the ImageNet-1k [26] train set. We present these results in Tab. 8 and compare to other encoders using their reported numbers. In every case, $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ outperforms all existing open encoders, including those with significantly more parameters.", + "bbox": [ + 107, + 665, + 888, + 742 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Summary. $\\mathrm{PE}_{\\mathrm{core}}$ , a unified image-video encoder, achieves state-of-the-art performance across zero-shot classification and retrieval on both images and videos on a wide variety of benchmarks. This synergy is made possible by our robust image pretraining recipe (§2.1) and powerful video data engine (§2.2), which together enable the model to effectively leverage the strengths of both image and video data at scale.", + "bbox": [ + 107, + 752, + 888, + 814 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "4We use the version provided by [39] and re-evaluate all models to ensure a fair comparison.", + "bbox": [ + 127, + 898, + 692, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 936, + 509, + 949 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3 General Features in a Contrastive Disguise", + "text_level": 1, + "bbox": [ + 109, + 80, + 565, + 99 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "$\\mathrm{PE}_{\\mathrm{core}}$ puts up strong results on the tasks contrastive encoders are known for, like zero-shot classification and retrieval. But while those tasks are useful, they are only a small part of the vision ecosystem. What really matters is whether or not the features learned with our pretraining recipe are useful to downstream tasks.", + "bbox": [ + 109, + 113, + 887, + 159 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Today's common wisdom in the vision community cites that different pretraining methods result in features useful for different tasks: e.g., contrastive for classification, captioning for language modeling, and self-supervised learning for spatial tasks. To see how $\\mathrm{PE}_{\\mathrm{core}}$ stacks up against against models with different pretraining techniques, we compare its frozen features to the state-of-the-art large-scale models for captioning (AIMv2-3B [37]) and self-supervised learning (DINOv2-g [98]) on a variety of downstream tasks.", + "bbox": [ + 107, + 166, + 887, + 243 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Layerwise Feature Analysis. We summarize the results of our frozen feature analysis in Fig. 8 for several downstream benchmarks in 3 categories: classification, language modeling, and spatial tasks. For classification, we probe each model using a randomly initialized cross attention transformer block. For language alignment, we use the Perception Language Model (PLM) [21] frozen encoder evaluation setup, learning a projector and finetuning a decoder-only LLM (see §4), and for spatial tasks we train with several different decoders (ViTDet [72] Mask-RCNN [43] with Absolute Win [7] for detection, DPT [109] for depth, and zero-shot feature correspondence for tracking [52]). For each experiment, we sweep over the layers of the model as the optimal features are not necessarily the last [18]. In each case, we use an equivalent image size (window size for detection) of $32 \\times 32$ tokens. In each plot, we normalize performance by the maximum and minimum performance across models on that task.", + "bbox": [ + 109, + 253, + 398, + 645 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "An Alignment Problem. This analysis reveals several insights. First, as expected, AIMv2 performs well at classification and the best at visual Q&A language tasks. Similarly, DINOv2 performs the well on spatial tasks like detection, depth, and even performs the best at grounding through an LLM. Then as already established by other works: DINOv2 lacks performance on OCR tasks [134]. This is no secret, but what is interesting is that its performance peaks in the middle of the network and then drops significantly by the end. And so does the performance of other models", + "bbox": [ + 109, + 657, + 397, + 883 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/f5fca3271c106d2ca387a323bc011e0ec62f8183ffeae011885fd74e2caabfaa.jpg", + "image_caption": [ + "Figure 8 Layer Analysis. Evaluating intermediate layers as frozen features across tasks for different pretraining methods: captioning (AIMv2-3B [37], left), spatially self-supervised (DINOv2-g [98], middle), and our contrastive recipe $\\mathrm{(PE_{core}G}$ , right). Vertical lines denote the best layer and horizontal lines the best performance across models. As expected, AIMv2 performs well on language but not spatial, and DINOv2 performs well on spatial but not language. But surprisingly, intermediate layers of $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ perform well on both language modeling and spatial tasks." + ], + "image_footnote": [], + "bbox": [ + 421, + 258, + 883, + 758 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "for other downstream tasks (AIMv2: tracking, grounding, detection; DINOv2: VQ&A, grounding).", + "bbox": [ + 109, + 883, + 821, + 901 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 936, + 506, + 949 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "$\\mathrm{PE}_{\\mathrm{core}}$ exhibits similar behavior, but with unexpected results. Unlike the others, in earlier layers of the network $\\mathrm{PE}_{\\mathrm{core}}$ performs well on all tasks, often matching or exceeding the leading models. Remarkably, PE has intermediate layers that perform near to or on par with AIMv2 for language tasks and DINOv2 for spatial tasks, despite being trained with contrastive loss. Depth estimation is particularly noteworthy, as contrastive encoders are not typically considered state-of-the-art in that area.", + "bbox": [ + 109, + 80, + 887, + 157 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "However, in almost all cases this strong performance diminishes rapidly towards the end of the network. In fact, the performance of $\\mathrm{PE}_{\\mathrm{core}}$ in the final layer is abysmal for certain tasks, such as LLM-based grounding (the reason for which will become apparent in §5). This behavior is less pronounced the closer the downstream task is to the pretraining method, suggesting an alignment problem. Specifically, a well-tuned large-scale contrastive model can learn general embeddings in the process of fitting its objective, but it fails to output them. Therefore, to reveal these embeddings, the model must be subsequently aligned to downstream tasks.", + "bbox": [ + 107, + 162, + 887, + 255 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Analysis. The finding that pure CLIP models possess features which match the performance of state-of-the-art pretraining methods in their specialized domains is new. In fact, recent work [31] has shown the opposite—that CLIP models fail to scale on downstream tasks. We next investigate how our approach yields these results.", + "bbox": [ + 107, + 266, + 887, + 313 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To start, we perform layerwise frozen feature analysis on COCO detection. $\\mathrm{PE}_{\\mathrm{core}}$ was particularly \"peaky\" on this task in Fig. 8, with its best layer on par with DINOv2, but last layer significantly worse. We already ablated each change we made from vanilla CLIP in Fig. 2 using a ViT-L/14 model. So to retrace our steps, we run frozen feature analysis on those checkpoints. For efficiency, we perform this experiment at a lower resolution and only sample even layers. In Fig. 9, we report COCO box mAP for the last and best layers for each cumulative ablation, along with the index of the best layer. Further, we plot the layerwise performance for each change in Fig. 10.", + "bbox": [ + 109, + 319, + 437, + 546 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Surprisingly, the simple changes we made in §2.1 to construct our pretraining recipe overall improved the best layer's performance by", + "bbox": [ + 109, + 551, + 437, + 598 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "almost $10\\,mAP$ over vanilla CLIP! Some changes like high resolution (5) and RoPE (6) improving spatial features is to be expected, but unexpectedly data augmentation (8) and especially progressive resolution (2) help considerably. It is possible that contrastive pretraining is prone to overfit to the \"global\" nature of the task through \"global tokens\" [23]. However, as the model cannot maintain global tokens in the same place due to the resolution progressively changing, it is forced to be more robust. Also of note is that both progressive resolution (2) and attention pooling (7) move the argmax layer deeper into the network (rightmost column of Fig. 9). Attention pooling in particular alters the whole shape of the layerwise performance curve (Fig. 10), while the other changes typically only raise or lower it.", + "bbox": [ + 107, + 598, + 887, + 719 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/fc9e5f4a4cb1aee69d2d431bcfb675feed0a0647d32f7580603105b76e8e7e13.jpg", + "image_caption": [ + "Figure 10 Layer Analysis corresponding to the results presented in Fig. 9." + ], + "image_footnote": [], + "bbox": [ + 127, + 733, + 352, + 847 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Potentially more interesting is what did not improve performance: specifically, increasing the batch size (3) and using LAMB with a high learning rate (4). Both of these changes explicitly help the model fit the CLIP loss better, which after a certain point may not improve the general features. Moreover, while the best layer overall improved significantly, the last layer performance stagnated after (2). This suggests that constructing the global CLIP token requires a substantial \"decoder\" (in this case, 6 layers for the final L/14 model). Although the features of this decoder are beneficial for some tasks (e.g., Visual Q&A as shown in Fig. 8), they are not general. Nevertheless, this does not prevent the model from learning general features; it merely limits their expression in the output.", + "bbox": [ + 369, + 726, + 887, + 893 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/bb11b4227ea27e2b7a911634295b0442145b980ca3b98799f6c03070636667d3.jpg", + "image_caption": [ + "Figure 9 The Downstream Effects of Robust Pretraining. The ViT-L/14 checkpoints from Fig. 2 evaluated as frozen features on COCO [76] using Mask R-CNN [43]. We report the last layer performance, best layer performance, and the best layer's index." + ], + "image_footnote": [], + "bbox": [ + 470, + 328, + 893, + 520 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 936, + 508, + 948 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Scaling Behavior. Finding a simple, easily scalable vision pretraining method that produces generally useful features has been the white whale of the vision community for a while. Evidently, our robust recipe can enable contrastive pretraining to produce general features. So that begs the question, \"does it scale?\"", + "bbox": [ + 109, + 80, + 395, + 202 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We can answer this question in the same way: by performing frozen feature layer analysis of our S/14, B/14, and L/14 scaling ablation checkpoints from Fig. 3. We report the result of that analysis in Fig. 11. We also include our final $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ model using the same setup, but note this is an estimate as our ablation and final schedules are different.", + "bbox": [ + 109, + 208, + 395, + 345 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Immediately, we see a stark contrast between the scaling behavior of the vanilla CLIP recipe and ours. While the vanilla recipe quickly plateaus at L scale (300M), the best layer of our robust pretraining recipe demonstrates scaling to G scale (2B) and potentially beyond—despite being trained with a decidedly non-spatially aligned global contrastive loss. However, this is the best layer. The last layer performance still stagnates for both the vanilla recipe and ours. This may be why prior work [31] finds contrastive pretraining to not scale for downstream tasks—CLIP loss obfuscates its general features even with our recipe, placing them several layers deep.", + "bbox": [ + 109, + 352, + 398, + 609 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "However, this is just for a single spatial task. To see whether the trend is consistent, we repeat this scaling analysis on a wide variety of downstream language modeling tasks using the same frozen evaluation setup as Fig. 8 and report the results in Fig. 12. Surprisingly, the simple change in pretraining recipe improves scaling for most language tasks as well—including output-side grounding (RefCOCO). Note that in this benchmarking setup, the LLM never sees videos during training so the Video Q&A per-layer results are noisy. Yet, the best layer trend is still the same.", + "bbox": [ + 109, + 616, + 395, + 843 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Clearly, contrastive pretraining with our", + "bbox": [ + 109, + 849, + 393, + 864 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/680e4b398d5756980de8964a501a60f2ed9e9bc97c89dc8c8f2713f06c35df5c.jpg", + "image_caption": [ + "Object Detection" + ], + "image_footnote": [], + "bbox": [ + 433, + 84, + 581, + 175 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1b0a4479ace41e4468c9d0b366523e8a1b53945a1a9b0b2119f2404c3d13aa7d.jpg", + "image_caption": [ + "Figure 11 The Downstream Scalability of Robust Pretraining. Left: frozen feature layer analysis of the S/14, B/14, and L/14 models from Fig. 3 using the same setup as Fig. 9. Right: scaling behavior of the best layer for each model. Note: G is our final model and has a different schedule." + ], + "image_footnote": [], + "bbox": [ + 586, + 84, + 722, + 175 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/f5558127ca340630103f112802d3339a03ea6bb487c5a14602365c00083566cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 730, + 75, + 880, + 174 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/f0f9c8d5c4a5a11e170bd6d017535455669fe0b3375e69c34192d6d802625bff.jpg", + "image_caption": [ + "OCR Q&A" + ], + "image_footnote": [], + "bbox": [ + 433, + 267, + 581, + 359 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1d919e2be4972d7ee8c715e35452670a046aa1a14f011a743ec97cf94f488312.jpg", + "image_caption": [ + "Visual Q&A" + ], + "image_footnote": [], + "bbox": [ + 584, + 267, + 720, + 359 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/97e0715fa950508aba5efbe4d86caa4736b44d0c0bc64e09a56362282f848505.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 260, + 887, + 359 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8eb0b6d76f12e7e9096fe1271f2475ea64da59c454705c61c47c9dd77a4cdd00.jpg", + "image_caption": [ + "Captioning" + ], + "image_footnote": [], + "bbox": [ + 433, + 378, + 581, + 470 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/26509c66b2d2df75bdcefbf9805f13ce7b78a3f5f1cd8a7e80b714ae026923a8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 378, + 720, + 470 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/a52d0f7bd5611a851c47ac32cdcbebaceb3ff1ce3d7386aac43357af4d5233e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 371, + 887, + 470 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/60e724836714f148b095c958f2fa3c8365440358f8725695bdf36b507e2fa403.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 433, + 489, + 581, + 580 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/78c17213db120c5ebe12a05496435a8d303071b8bdb87f16e11e39ab47c4765b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 489, + 720, + 580 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/20d8251e5ba4bf8d5681fa3ff5be9fcc32ccefbaa9ebd7cec8f10d02b7a25c31.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 482, + 885, + 580 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9c8a0c3c990863113bbb03ad44d3adc6e267d088dfd26283ab9f4a70b3660758.jpg", + "image_caption": [ + "Grounding" + ], + "image_footnote": [], + "bbox": [ + 433, + 601, + 581, + 691 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/ccbd47c6e6d593acf38cd94b6da64e2f459998bfa155f47040da5a83dd7caea4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 601, + 720, + 691 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/dd821677cb7f3f76bd438b9007918a7b8f2f673bbbe2297e30d83246224039ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 593, + 885, + 691 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c6abf374b521c762f6d0b8e1d04cb5578725fcd2cbb8f2abdab0bd9b47747a60.jpg", + "image_caption": [ + "Video Q&A" + ], + "image_footnote": [], + "bbox": [ + 433, + 712, + 581, + 803 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8139c17439c85304530ae62c31deae51f505bb24714eb0c4741895c57897056c.jpg", + "image_caption": [ + "Figure 12 Further Scalability Analysis. We repeat the analysis from Fig. 11 on a wide range of downstream tasks by adapting to a language model. Each category is an average of several downstream tasks (see §4)." + ], + "image_footnote": [], + "bbox": [ + 584, + 710, + 720, + 803 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/92e7132599ebc88ee01bb6b8843129af5bde6cdef2acbe9db261ddbed7c8ddd1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 700, + 885, + 803 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "robust recipe produces strong general features that scale. However, these features are not going to be much use stuck in the middle of the network. To remedy this, in the remaining sections we will discuss methods for aligning these general features to the output of the network for both language modeling and spatial tasks.", + "bbox": [ + 109, + 864, + 885, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 936, + 508, + 948 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4 Perception Encoder: Language Alignment", + "text_level": 1, + "bbox": [ + 109, + 80, + 558, + 99 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In §3 we have seen that $\\mathrm{PE}_{\\mathrm{core}}$ already possesses useful features for vision-language modeling. In this section, we lift these features through alignment tuning to construct a new encoder, $\\mathrm{PE}_{\\mathrm{lang}}$ , specialized for multimodal large language models (MLLMs). Our principle is to design not only the most performant, but also the most general vision encoder for use in MLLM development. To this end, we want a single language-aligned encoder that performs well across language models, across input resolutions, and for a wide variety of MLLM tasks.", + "bbox": [ + 107, + 106, + 883, + 183 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "MLLM Evaluation Tasks. In this section, our main testbed is to adapt vision encoders to MLLMs and test on various MLLM tasks. We evaluate the downstream performance of each MLLM across five task categories: (1) OCR, Chart, Document Q&A on ChartQA [165], DocVQA [91], InfoVQA [92] and AI2D [57]; (2) Visual Q&A on TextVQA [125], OK-VQA [118], POPE [73], and VQAv2 [40]; (3) Captioning on Flicker [157], COCO [76], and No Cap [1]; (4) Video Understanding on VideoMME [38], STAR [148], TGIF-QA [53], EgoSchema [89], MVBenchmark [68], and PerceptionTest [105]; and finally (5) Grounding on RefCOCO [56].", + "bbox": [ + 107, + 194, + 887, + 287 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.1 Language Alignment Method", + "text_level": 1, + "bbox": [ + 109, + 297, + 393, + 314 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We begin by searching for the optimal language alignment method. We design our alignment tuning based on the midtraining stage of Perception Language Model (PLM) [21], which is to adapt $\\mathrm{PE}_{\\mathrm{core}}$ to a pretrained decoder-only LLM (Llama 3 [82]) connected by a vision projector. We start with \"warmup\" training stage with autoregressive next-token prediction loss on 1M image-text samples from pretraining, where everything but the projector is frozen. Then, we proceed to finetune all parameters on 70M data samples [21] covering natural images, documents/charts/diagrams, and videos, using the same next-token prediction loss. After completing this language alignment, we extract the vision encoder from the model and refer to it as $\\mathrm{PE}_{\\mathrm{lang}}$ .", + "bbox": [ + 107, + 321, + 883, + 429 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To arrive at the optimal training configuration presented in PLM [21], we first conduct ablation studies using a 20M subset of the data. In Tab. 9, we ablate the LLM sizes, training parameters, vision projector types, output layers to project, and encoder regularization. We evaluate across OCR Q&A, Captioning, Visual Q&A, and Video Q&A and find the best configuration.", + "bbox": [ + 109, + 435, + 568, + 526 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "LLM Setup. We explore different scales (1B or 3B parameters) and freezing weights of the LLM. We observe that going from 1B to 3B parameters increases average score by 1.6 points $(76.5\\rightarrow 78.1)$ . Unfreezing the LLM boosts this number to 78.4.", + "bbox": [ + 109, + 532, + 566, + 594 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Vision Projector. Using a 2-layer MLP vision projector instead of a linear layer improves the average score from 77.2 to 78.1, while only adding few parameters (13.5M → 27M).", + "bbox": [ + 109, + 601, + 566, + 646 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "PE Output Layer. As shown in §3, $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ has intermediate layers that perform significantly better than the last layer when used as features for certain tasks. However, it is not clear if that", + "bbox": [ + 109, + 654, + 566, + 698 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/6aaf571ddc34b68dd60e42fa52c459e5fa0be4d384dfe35f17bc16668a48d9aa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LLM scaleLLM unfrozen Regularization?ProjectorLayerAvg.OCR Q&A Average of 4
Average of 3Captioning Average of 3
LLM Setup
1BMLP4776.560.7115.176.054.0
3BMLP4778.165.9115.776.654.1
3BMLP4778.465.8117.676.353.7
Vision Projector
3BLinear4777.264.5114.176.553.7
3BMLP4778.165.9115.776.654.1
PE Output Layer
3BMLP5075.956.6116.776.553.7
3BMLP4778.165.9115.776.654.1
3BMLP4176.965.5112.875.453.9
PE Regularization
3BMLP4779.969.0117.577.455.6
3BMLP4780.168.7118.377.056.3
", + "bbox": [ + 593, + 441, + 890, + 643 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 9 Language Alignment. We find the best configuration to language align $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ using autoregressive language training.", + "bbox": [ + 589, + 646, + 885, + 688 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "same behavior applies when finetuning. We test applying the projector to layers 41, 47, and 50 (the last layer), and find that layer 47 works best. Incidentally, this is also the optimal layer for frozen VQ&A in Fig. 8.", + "bbox": [ + 109, + 699, + 885, + 729 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "PE Regularization. We apply LayerScale [135] and DropPath [50] to the vision encoder during the alignment, for stabilizing training. This improves the 78.1 average score to 79.9 (+1.8 points). Unfreezing the LLM boosts this number further to 80.1. We choose this configuration (last row) as our final alignment setup.", + "bbox": [ + 107, + 737, + 887, + 782 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To construct $\\mathrm{PE}_{\\mathrm{lang}}$ , we scale this recipe up the 70M samples mentioned above (more details in [21]). In summary, we use a pretrained Llama3.2 3B, unfrozen, with a 2-layer MLP as a vision projector on top of layer $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ layer 47 (with the last 3 discarded) and regularize the encoder with LayerScale and DropPath. Compared to the 20M sample ablation setting in Tab. 9, the final $\\mathrm{PE}_{\\mathrm{lang}}$ trained on 70M total samples gives another +2.1 points to 82.2 on the average across OCR Q&A, Captioning, Visual Q&A, and Video Q&A.", + "bbox": [ + 107, + 789, + 887, + 867 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Effects. The goal of alignment tuning is to lift the strong features found in intermediate layers of $\\mathrm{PE}_{\\mathrm{core}}$ described in §3 to the end of the network. To see if we actually accomplished that, we perform the same layerwise", + "bbox": [ + 107, + 877, + 887, + 909 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "analysis as in Fig. 8 on our final $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ model and compare it to the original $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ checkpoint it was initialized from. We present the results of this analysis in Fig. 13, and immediately we see that language alignment was a success: across all categories, the performing layer for the aligned model was the last, no matter the performance of the original checkpoint. Notably, our $\\mathrm{PE}_{\\mathrm{lang}}$ training mix did not contain grounding data, which means that this significantly lifted grounding performance is entirely due to the strong intermediate grounding features in $\\mathrm{PE}_{\\mathrm{core}}$ now being aligned to the end of the network. Moreover, specific domains such as OCR Q&A that were represented in the training mix see a significant boost to performance compared to even the best layer of $\\mathrm{PE}_{\\mathrm{core}}$ , which was already strong. Thus, with an order of magnitude fewer samples compared to pretraining, we were able to language align $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ to create a single, strong encoder for all visual language modeling tasks. Following this success, we align $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}$ in a similar manner to construct $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{L}$ (see [21]).", + "bbox": [ + 109, + 80, + 509, + 383 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/c532d458f803584390cf5e69b8ff8dfe0debb484e426f8af53a4c2f42efbf43c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 545, + 85, + 715, + 198 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/14b60cb5564f062b0f5bb2840805e4b8a21d292381a916d262d40bd14b58afec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 718, + 85, + 883, + 196 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/daad2d2e328b97efc76360b8bc401940c602a89b5d2c0a1b5764d1fc9b3b3d3e.jpg", + "image_caption": [ + "Figure 13 Language Alignment. We analyze how language alignment changes the internal features of PE. Similar to our $\\mathrm{PE}_{\\mathrm{core}}$ analysis in Fig. 12, we extract $\\mathrm{PE}_{\\mathrm{lang}}$ and adapt each layer to a new LLM." + ], + "image_footnote": [], + "bbox": [ + 547, + 202, + 712, + 314 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/78f9649d19944548bbb57462f9d14c6aafc7fcd268b77b96d9d92ea589b00fab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 200, + 885, + 314 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.2 Comparisons with Existing Vision Encoders", + "text_level": 1, + "bbox": [ + 109, + 401, + 509, + 417 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We compare $\\mathrm{PE}_{\\mathrm{core}}$ and $\\mathrm{PE}_{\\mathrm{lang}}$ with other vision encoders that are popular choices in MLLM literature: MetaCLIP [152], SigLIP2 [138], CLIP [106], AIMv2 [37], DINOv2 [98], and InternViT2.5 [18]. Overall, these encoders span several different pretraining losses (e.g., contrastive, captioning, self-supervised, and mixed supervision), encoder sizes (from 300M to 6B parameters), and resolutions (from 224 to 512). For all vision encoders, we find the best intermediate layers to train MLLM for fair comparison (more in Appendix B.2).", + "bbox": [ + 109, + 425, + 887, + 503 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "MLLM Benchmarking Setup. We connect each vision encoder, including $\\mathrm{PE}_{\\mathrm{lang}}$ , to a language decoder with a fresh 2-layer MLP projector. Similar to the alignment stage, we first train only the projector on a subset of 1M image-text pairs from pretraining. Then, we train both the projector and LLM on 2.6M visual Q&A pairs,", + "bbox": [ + 109, + 513, + 887, + 560 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/f7ec4c21f4db6833171c5782c1dfa0e8273b68efcd9efc14890eb5c453a133c9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [11]Avg. Ground RefLOCOg+ [56]Avg. VideoVideoMME Acc. [38]STAR Acc. [148]TGF-IQA Acc. [53]EgoScheme Acc. [89]MV-Bench Acc. [68]PerceptionTest Acc. [105]
CharQA Acc. [165]DocVQA Acc. [91]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1444.947.933.028.770.268.447.662.586.976.5110.587.5130.0114.160.653.946.151.066.458.649.451.9
MetaCLIP-G [152]1.8B224/1444.847.633.127.970.668.848.263.586.576.9111.186.5132.1114.860.553.145.050.766.456.048.751.9
PElang G†1.7B*224/1453.761.347.132.274.171.855.165.386.879.8116.491.0136.9121.265.755.547.355.768.959.648.652.9
576 Tokens per Image
CLIP [106]0.3B336/1453.561.749.532.870.172.760.763.987.378.9113.392.0132.9115.065.054.246.352.168.657.448.552.3
AIMv2-L [37]0.3B336/1453.361.648.032.171.473.762.764.387.780.1115.290.9135.6119.263.352.544.350.967.554.444.953.2
AIMv2 L Dist. [37]0.3B336/1453.761.149.431.572.774.162.864.888.380.3117.894.7137.5121.262.653.844.352.465.057.450.053.6
SigLIP2-so [138]0.4B384/1658.969.058.335.273.176.869.867.288.781.6116.592.1137.7119.867.454.545.553.167.257.649.354.5
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1466.976.873.641.176.176.268.566.089.181.3119.796.1139.6123.468.958.148.758.970.561.852.755.9
1024 Tokens per Image
InternViT 2.5 L [18]0.3B448/1460.674.159.235.973.174.265.464.487.679.6112.388.4133.7114.966.950.645.244.862.754.246.050.5
SigLIP2-so [138]0.4B512/1663.372.169.339.072.777.974.866.089.081.8117.493.5138.3120.269.655.846.255.467.062.050.054.5
PEcore L0.3B448/1459.468.762.536.669.774.767.764.388.378.7112.789.6133.4114.959.750.941.751.261.652.647.450.6
PElang L0.3B448/1471.181.081.946.475.077.173.065.589.380.8117.394.3137.3120.170.556.547.057.268.059.852.354.7
DINOv2-g [98]1.1B448/1430.019.614.724.261.561.019.360.488.675.8109.486.5131.6110.164.949.539.752.160.146.847.450.8
AIMv2 3B [37]2.7B448/1448.940.553.933.967.273.064.164.085.278.9115.793.8135.2118.136.154.645.154.566.755.451.754.3
InternViT2.5-6B [18]5.5B448/1459.972.359.435.272.575.568.964.988.280.2115.092.2136.3116.368.049.644.547.062.645.848.948.5
PEcore G1.9B448/1460.869.965.436.771.173.365.960.788.478.0112.591.6133.6112.466.652.042.353.162.951.448.853.6
PElang G†1.7B*448/1472.480.584.448.376.478.175.265.490.181.8120.196.6140.0123.671.358.048.060.169.462.052.456.0
", + "bbox": [ + 107, + 568, + 893, + 837 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 10 MLLM Results with Llama 3.18B. We compare various vision encoders at their native resolution using Llama 3.1-instruct 8B [82] as the language model. The table compares models of similar class in number of vision tokens and parameters. $\\mathrm{PE}_{\\mathrm{lang}}$ shows strong performance across all benchmarks, including against models $3\\times$ its size. ${}^{*}\\mathrm{PE}_{\\mathrm{lang}}$ has 1.7B parameters since we discard the last 3 layers during language alignment. $\\dagger$ Interpolated without extra training.", + "bbox": [ + 109, + 840, + 885, + 897 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "image captions, and image grounding samples (see Appendix B.2 for details). We benchmark at the native resolution of each encoder (with higher resolution tiling results in Appendix C.4). Finally, we ablate over two language decoders, Llama 3.1 8B [82] and QwenLM 2.5 7B [155], to measure generalization across LLMs.", + "bbox": [ + 109, + 80, + 887, + 127 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Results. Tab. 10 shows benchmarks results for native resolution input across existing encoders, $\\mathrm{PE}_{\\mathrm{core}}$ and $\\mathrm{PE}_{\\mathrm{lang}}$ . Notably, AIMv2 [37], InternViT2.5 [18], SigLIP2 [138] and $\\mathrm{PE}_{\\mathrm{lang}}$ are trained jointly with a language decoder using next token prediction objective, and thus they perform better overall compared to the base contrastive and self-supervised models across all the metrics. However, $\\mathrm{PE}_{\\mathrm{lang}}$ uses a fraction of the training FLOPs for language alignment tuning, while significantly outperforming all vision encoders by large margin (an average of $+3.5$ points for G and $+2.0$ points for L). Similarly, when tiling with 4 tiles and 1 thumbnail (see Appendix Tab. 30), both $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{L}$ and $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ outperform all existing vision encoders, including InternViT2.5 [18], which was specifically pretrained in a tiling setting and with grounding data. Appendix C.4, shows a breakdown of the RefCOCO results, as well as results for tiling with higher resolution.", + "bbox": [ + 109, + 138, + 887, + 277 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Transferability. As $\\mathrm{PE}_{\\mathrm{lang}}$ is aligned with Llama 3.2-instruct 3B, we conduct a separate set of experiments to check if our model performs well with a different base LLM. In Tab. 11 we repeat the native resolution comparison with QwenLM 2.5 7B [155]. Interestingly, $\\mathrm{PE}_{\\mathrm{lang}}$ not only outperforms all vision encoders in this setting, but it also outperforms InternViT2.5 [18], which is specifically aligned to QwenLM 2 [154] throughout midtraining. In fact, $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ with QwenLM even improves its performance with Llama in some cases like with OCR Q&A and video benchmarks, emphasizing the generality of our language alignment.", + "bbox": [ + 109, + 285, + 888, + 378 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/d4c9c0207cfe48c928432e95dc406954c073a838ea4989599c7687b773660fe9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Avg. Ground, ReCOC%+ [56]
CharQATextVQAFlicker CIDEr [157]Avg. Ground, ReCOC%+ [56]
Acc. [165]Acc. [125]COCO CIDEr [76]STAR Acc. [148]
DocVQADocVQANo Cap CIDEr [1]EGoSema Acc. [89]
Acc. [91]Acc. [92]Avg. Ground, ReCOC%+ [56]VideoOME Mec Aoc. [38]
Aoc. [57]Aoc. [73]Avg. VideoStAR Acc. [68]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1660.572.059.136.774.366.269.065.489.281.1116.391.6137.3120.070.057.051.355.866.061.051.955.7
SigLIP2-g-opt [138]1.1B384/1660.871.060.436.775.276.870.365.689.581.8118.896.4139.0121.169.958.352.057.668.162.052.857.4
PElang G†1.7B*336/1466.877.572.441.176.476.067.965.489.181.5118.894.6139.5122.370.160.254.661.769.863.654.357.2
1024 Tokens per Image
InternViT2.5 [18]0.3B448/1460.375.461.136.268.474.265.663.787.879.5112.188.5133.5114.168.155.850.354.766.659.050.653.8
SigLIP2-so [138]0.4B512/1666.377.271.942.473.977.974.265.689.981.8117.193.0138.0120.370.555.950.357.367.262.650.347.4
PEcore L0.3B448/1463.573.967.440.572.275.769.264.089.480.2113.388.7135.2115.966.557.349.657.867.760.852.355.5
PElang L0.3B448/1470.280.680.746.073.576.872.864.189.481.0116.493.4137.6118.170.458.351.659.867.462.253.455.4
DINOv2 [98]1.1B448/1431.321.714.724.664.361.018.959.588.976.9110.187.3132.1110.869.354.346.956.563.456.849.752.2
AIMv2 3B [37]2.7B448/1466.076.770.541.475.277.974.266.289.481.9119.296.4139.2122.067.656.345.958.067.860.851.453.9
InternViT2.5 [18]5.5B448/1464.278.265.339.673.676.470.164.589.381.7117.695.9138.4118.672.856.150.359.167.356.651.152.2
PEcore G1.9B448/1464.875.968.841.672.975.267.962.489.780.7113.191.7135.2112.370.557.048.758.366.960.852.954.5
PElang G1.7B*448/1472.981.683.749.576.777.974.964.590.381.9118.994.6139.8122.372.160.454.162.568.366.654.256.8
", + "bbox": [ + 107, + 385, + 895, + 584 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "System-Level MLLM Comparison. In Tab. 12, we conduct a system-level comparison to the state-of-the-art open-access MLLMs: LLaVA-OneVision 7B [66], Gemma3 12B [132], Molmo-D 7B [25], Qwen2 VL 7B [144], InternVL 2.5 8B [18] and the very recent InternVL 3 8B [168]. Each baseline uses a contrastively pretrained ViT (SigLIP-so400M [160], CLIP-L [106], DFN-H [33], and InternViT 2.5 300M [18]). For our PLM-8B we use $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ as the vision encoder with 36 tiles for images and 32 frames for video and Llama 3.1-instruct 8B as the language decoder (more details in [21]). We show numbers from their respective works or evaluate them ourselves if they are not reported (except for Gemma and InternVL 3). PLM-8B outperforms all other models tested, emphasizing that $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ can be used to drive strong results across a wide range of tasks.", + "bbox": [ + 109, + 631, + 887, + 755 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/cfe4b15691ebaac0d28f672e8a216e8ab712efe6f793dc7edb7c6cbec161fe75.jpg", + "table_caption": [ + "Table 11 MLLM Results with QwenLM 2.5 7B. Same setting as Tab. 10, but with QwenLM2.5 7B [155] as the language model. Although $\\mathrm{PE}_{\\mathrm{lang}}$ is aligned to Llama3.2 3B, the language alignment transfers well to a different language model." + ], + "table_footnote": [], + "table_body": "
ModelEncoderOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QA Acc. [165]Doc.VQA Acc. (test) [91]Info. QA Acc. (test) [92]Avg. VQA Text.VQA Acc. [125]OK-VQA Acc. [118]POPE Acc. [73]VQAV2 Acc. (val) [40]Avg. Cap. Flicker CIDEr [157]COCO CIDEr [76] No Cap CIDEr [1]Avg. Video Video.MME Acc. [38]STAR ACC. [148]TGIF-QA Acc. [53]EgoScheme (test) Acc. [89]MV.Bench Acc. [68]PerceptionTest Acc. (test) [105]
LLaVA-OV 7B [66]SigLIP-so400M81.480.086.768.890.179.977.369.689.283.579.555.770.7112.163.857.766.077.265.257.158.1
Gemma3 12B [132]SigLIP-so400M-75.787.164.9--67.7--71.6----------54.9
Qwen2 VL 7B [144]DFN-H86.683.694.576.591.780.983.667.988.383.893.779.9102.598.767.762.967.381.865.461.666.9
InternVL 2.5 8B [18]InternViT 2.5-300M87.084.693.077.692.879.979.369.290.680.6113.096.5125.8116.772.960.677.691.366.272.668.9
InternVL 3 8B [168]InternViT 2.5-300M87.286.692.776.892.6-80.2-91.1------66.3---75.4-
PLM-8BPElangG88.485.594.680.992.782.986.569.689.985.6127.4105.6146.7129.977.958.384.995.568.877.182.7
", + "bbox": [ + 111, + 762, + 890, + 883 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 12 MLLM System-Level Comparison. We show a system-level comparison between PLM-8B based on $\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}$ and popular open-access models of similar LLM scale using existing encoders. We report test set results where specified.", + "bbox": [ + 109, + 885, + 887, + 916 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 936, + 509, + 949 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5 Perception Encoder: Spatial Alignment", + "text_level": 1, + "bbox": [ + 109, + 80, + 535, + 99 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "While language alignment with a pretrained LLM decoder is well-established, the best way to spatially align a model is not obvious. As shown in §3, $\\mathrm{PE}_{\\mathrm{core}}$ already has features that perform well for spatial tasks. However, the layer that performs the best for higher level spatial tasks like detection or depth estimation (layer $\\sim 40$ ) is vastly different than the layer that performs the best for a pure spatial task like tracking (layer $\\sim 30$ ). While we were able to ignore this disparity during language alignment by aligning to an LLM decoder that could do all tasks, classical spatial tasks have decoders that come in all shapes and sizes. It would be impractical to simply align the model using all downstream decoders mirroring language alignment. Thus, we must first answer the question, what is happening in the features at those layers to make them useful for spatial tasks?", + "bbox": [ + 107, + 112, + 888, + 234 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.1 Core Feature Analysis", + "text_level": 1, + "bbox": [ + 109, + 253, + 338, + 268 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We begin by analyzing the spatial properties of the features for $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ in the range of layers where it performed optimally for zero-shot tracking in §3. In Fig. 14, we plot (1) the pairwise feature cosine similarity between the pink token and all others, (2) the head average attention map for that token, and (3) the full attention matrix $(HW\\times HW)$ .", + "bbox": [ + 109, + 277, + 343, + 445 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "An 18 Layer Decoder. Remarkably, the cause for the tracking performance peak at layer 32 is abundantly clear from observing", + "bbox": [ + 109, + 455, + 343, + 515 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a542d95f67d6e1d95a991683d76f091ce129ea008cea4253e777d56226a43c1e.jpg", + "image_caption": [ + "Figure 14 $\\mathsf{PE}_{\\mathrm{core}}\\mathsf{G}$ Feature Analysis. To understand the dichotomy between optimal $\\mathsf{PE}_{\\mathrm{core}}$ features for spatial tasks observed in Fig. 8, we analyze the spatial properties of the features between layers 30 and 34." + ], + "image_footnote": [], + "bbox": [ + 383, + 257, + 887, + 455 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "the visualizations. Up until layer 32, the attention maps remain local. However, that changes abruptly at layer 33, at which point several tokens in the background of the image become \"global\" tokens. As shown by the vertical lines in the full attention matrix, starting from layer 33 every token attends to them. Thus, every layer 33 and up become part of a decoder for global information.", + "bbox": [ + 107, + 515, + 887, + 575 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This is not a new phenomenon. Recent work [23] shows this happening in all modern vision transformers above L scale. But notably these \"global tokens\" are not necessarily harmful. Given the optimal layer for most tasks in Fig. 8 lies within the global token region, the information they aggregate is useful downstream. However, tracking in §3 is zero-shot and relies purely on spatial correspondences, meaning it cannot make use of the global tokens. This explains why tracking peaks right before their introduction, while tasks that rely on semantic understanding or have larger decoders that can benefit from them do well with the later layers.", + "bbox": [ + 107, + 583, + 887, + 676 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.2 Spatial Alignment Method", + "text_level": 1, + "bbox": [ + 109, + 693, + 375, + 709 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Given the analysis in §5.1, we have two objectives in creating a spatial alignment method: (1) we must preserve the optimal semantic information of the model (including the global tokens) that peaks around layer 40, and (2) we must do so while emphasizing local alignment in service of spatial tasks with shallow decoders. The first can be easily achieved by aligning with the model's own features (e.g., with MaskFeat [147]), but the second is more challenging. To accomplish this, we employ the Segment Anything Model (SAM) 2.1 [111] in a novel way to enforce spatial correspondence information in PE.", + "bbox": [ + 107, + 717, + 887, + 809 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Retaining Semantics. To retain the strong semantic features from $\\mathrm{PE}_{\\mathrm{core}}$ , we finetune the model with itself as a teacher. Specifically, we train the model to minimize the cosine similarity between its last layer and the frozen layer 41 features of its initialization (a layer around the peak for many tasks in Fig. 8). On its own this would be a tautology, so we apply heavy regularization to the student: DropPath [50] and LayerScale [135] similar to language alignment, as well as performing MaskFeat [147] with $75\\%$ masking. We keep the teacher", + "bbox": [ + 107, + 820, + 887, + 897 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 936, + 509, + 949 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "fixed in contrast to other state-of-the-art spatial models, which all employ an EMA teacher [98, 138]. This could potentially help, but we opt for simplicity.", + "bbox": [ + 109, + 80, + 887, + 112 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Encouraging Locality. While we could \"retain\" locality by self-distilling from layer 32 features, that may be less effective as we are already distilling another layer of the model. Instead, we turn to a model that is explicitly tuned for locality: SAM [58, 111]. Notably, several works [110, 116, 119] have shown SAM to not be an effective teacher when distilling from multiple sources (though recently [45] has shown it can help with some tricks). However, upon observation of the raw features of SAM 2.1-L (Fig. 15), the main problem may be the same one we are currently trying to solve: SAM has global tokens as well! In this case,", + "bbox": [ + 109, + 123, + 490, + 304 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/d041c7193a207d97bff0767ce452ad71d55bf14f0d2698e25d892c6237ddce26.jpg", + "image_caption": [ + "Figure 15 SAM 2.1 Feature Similarity. The cosine similarity between the pink marked token and all others for SAM 2.1-L [111] features vs. our proposed mask logit features." + ], + "image_footnote": [], + "bbox": [ + 526, + 114, + 887, + 244 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "they appear as dark spots in a grid-like arrangement across all examples in Fig. 15 raw features.", + "bbox": [ + 109, + 304, + 803, + 319 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Using the features of a model that itself has global tokens to mitigate the effect of global tokens is dubious at best. But, we don't have to use SAM's features to learn locality. At its core, SAM is a model that transforms points into spatially contiguous masks of select object. If what we want is smooth, locally consistent features, we can use the mask predictions themselves. Specifically, we query SAM 2.1-L with 1024 points arranged in a $32 \\times 32$ grid. For each point, SAM returns a $H \\times W$ mask logit the size of the image, which it normally would threshold and NMS. However, we instead concatenate those logits into a $H \\times W \\times 1024$ tensor and use that as the feature map for alignment. This explicitly produces locally well-aligned features compared to the underlying feature space and has no spatial artifacts caused by global tokens, as shown in Fig. 15.", + "bbox": [ + 109, + 325, + 887, + 448 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Then to align, we distill the spatial correspondences between tokens by computing their pairwise cosine similarity for both the student and the teacher (creating a $HW \\times HW$ matrix for each) and aligning them with MSE loss. Unlike SAM's underlying feature space (which [45] shows may be brittle to interpolation), the mask logit features are robust to interpolation, so we simply interpolate them down and train at the $\\mathrm{PE}_{\\mathrm{core}}$ model's original 448px resolution. Finally, like for self-distillation we add the same masking and regularization. For both teachers, we apply loss to all tokens and add no extra parameters other than LayerScale.", + "bbox": [ + 109, + 455, + 887, + 546 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Effects. Again, the goal of alignment is to lift the strong features already learned by the core model as shown in §3. Thus, like we did for language alignment in §4.1, we perform layerwise frozen feature analysis on spatial tasks in Fig. 16. This time, we evaluate the original $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ checkpoint as well $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ aligned to its own layer 41, to SAM 2.1 mask logits, and finally both. We denote aligning to both as $\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}$ .", + "bbox": [ + 109, + 556, + 486, + 679 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Aligning purely based on the original model's layer 41 features performs well on detection, depth, and semantic segmentation, but falls short for zero-shot tracking, where precise locality is necessary to define boundaries between objects. In contrast, aligning to SAM 2.1 mask logits lowers last layer performance on every task except for tracking, where it significantly improves performance. Understandably, this is because the mask logits have little semantics (see Fig. 17). Thus, the optimal approach is to combine both teachers. As a result, $\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}$ not only lifts the features for all tasks to the end of the network, but it also improves over self-alignment alone. Notably, $\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}$ s tracking performance is lower than", + "bbox": [ + 109, + 686, + 488, + 897 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "the SAM-aligned model, but it is still ahead of other methods while being a generally good model, see §5.3.", + "bbox": [ + 109, + 897, + 880, + 912 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/b8d97850bf3742315f6fb8c066d8dda1568ae7083e36a3556ca7fe5042281f80.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 553, + 694, + 672 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/3e39ae7eaea818e7ae127909f0ae634826915332b7a34afe41e57b9b94cbbbd5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 553, + 875, + 672 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/60d858a7dc9bdf799b7f99cce57d2f46fdf7293c74a4009040792d6e25d33c2a.jpg", + "image_caption": [ + "Figure 16 Spatial Alignment. We analyze how our two spatial alignment methods individually change the internal features of $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ . Then we combine both alignment methods to create $\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}$ (see Appendix B.3.1)." + ], + "image_footnote": [], + "bbox": [ + 513, + 676, + 694, + 824 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/1a404914c26bae32a66185bf5d5c70c669e15b4a0244ddd49a1a6aed2e99497c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 676, + 875, + 824 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Last Layer Feature Visualization. In Fig. 17, we visualize the last layer features for the $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ and the 3 aligned models, with similar colors denoting similar features. In the first column, we see why the last layer performance of $\\mathrm{PE}_{\\mathrm{core}}$ is so poor: while the last layer features contain information about the salient objects, they seem to have lost spatial coherence. Aligning to the model's own layer 41 features fixes this, but its spatial quality is lacking. In contrast, the model aligned to SAM 2.1 mask logits has locally clear features, but without semantics (similar objects have dissimilar features, see row 1 cats and row 2 cows). $\\mathrm{PE}_{\\mathrm{spatial}}$ using both teachers at once, retains the semantics of $\\mathrm{PE}_{\\mathrm{core}}$ while producing high quality spatial features.", + "bbox": [ + 109, + 80, + 473, + 308 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/e230c7cf4ab7709671d53765702b74a3a0690485de5e999521b719874d64f7c2.jpg", + "image_caption": [ + "Figure 17 Last Layer Visualization for the models in Fig. 16 using 3 dimensional PCA to map features to LCh color space (see Appendix B.3.2). More examples in Appendix C.5." + ], + "image_footnote": [], + "bbox": [ + 503, + 69, + 874, + 262 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/a5560ec88cc71a74991ee5cc6e041c018b9aaa38f6f3e9e3aff01f61f4f5de3c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
EncoderParamsResolutionTrackingSegmentationDepth
DAVIS (↑) [104]ADE20k (↑) [167]NYU (↓) [123]
BestLastIdxBestLastIdxBestLastIdx
OAI CLIP-L [106]0.3B224/1439.437.117/2439.438.319/24.366.39719/24
AIMv2-3B [37]2.7B448/1454.729.313/2441.631.920/24.311.32616/24
SigLIP-so [160]0.4B384/1448.736.316/2740.138.322/27.339.36921/27
SigLIP2-so [138]0.4B512/1651.445.315/2744.042.924/27.306.32925/27
SigLIP2-g-opt [138]1.1B384/1643.538.832/4042.141.334/40.302.32434/40
DINOv2-L [98]0.3B448/1458.758.223/2447.347.324/24.297.30823/24
DINOv2-g [98]1.1B448/1458.558.540/4048.748.437/40.279.29027/40
PEcoreG1.9B448/1456.842.832/5041.538.644/50.249.30939/50
PEspatialG1.9B448/1461.561.550/5049.348.949/50.262.27546/50
", + "bbox": [ + 114, + 330, + 531, + 458 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/09866ed878a98816d1da5b6fe224d8be814c643218717faa1346ddba0c7367a8.jpg", + "table_caption": [ + "Table 13 Frozen Feature Dense Prediction including zero-shot tracking, semantic segmentation and depth estimation. We report best and last layer performance, along with which layer was best for each model. See Appendix B.3.3 for experimental settings." + ], + "table_footnote": [], + "table_body": "
EncoderParamsPretrain ResolutionLVIS [41]COCO [76]
APboxAPmaskAPboxAPmask
OAI CLIP-L [106]0.3B224/1445.041.954.047.5
MetaCLIP-G [152]1.8B224/1445.141.953.246.7
SigLIP-so [160]0.4B224/1445.041.954.447.6
MAE-L [44]0.3B224/1446.143.955.649.3
EVA02-L [35]0.3B224/1449.345.254.948.2
SigLIP2-so [138]0.4B512/1649.345.656.049.4
SigLIP2-g-opt [138]1.1B384/1652.948.557.150.2
DINOv2-L [98]0.3B518/1446.743.555.749.0
DINOv2-g [98]1.1B518/1451.547.357.250.0
PEcoreG1.9B448/1451.947.957.049.8
PEspatialG1.9B448/1454.249.357.850.3
", + "bbox": [ + 550, + 330, + 879, + 468 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "5.3 Comparisons with Existing Vision Encoders", + "text_level": 1, + "bbox": [ + 109, + 539, + 511, + 556 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Frozen Feature Dense Prediction. In Tab. 13, we compare different vision encoder's frozen features on three dense prediction tasks: DAVIS tracking [104] (J&F) following the training-free setting from [52, 107], ADE20k semantic segmentation [167] (mIoU) linear probing, and NYU depth estimation [123] (RMSE) with a DPT head [109]. For each model, we report both its best layer and last layer performance. Across the board, $\\mathrm{PE}_{\\mathrm{spatial}}$ performs outperforms other state-of-the-art spatial models, with its best features being much better aligned to the last layer than the $\\mathrm{PE}_{\\mathrm{core}}$ it started from. Notably, SigLIP2, which during pretraining combines spatial, captioning, and contrastive losses [138] is not aligned well to the last layer in comparison.", + "bbox": [ + 107, + 568, + 887, + 676 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "End-to-End Finetuning Detection and Segmentation. In Tab. 14, we compare $\\mathrm{PE}_{\\mathrm{core}}$ and $\\mathrm{PE}_{\\mathrm{spatial}}$ with other popular vision encoders in the standard full-finetuning ViTDet [72] Mask-RCNN [43] setting using COCO [76] and LVIS [41] as benchmarks. In this controlled experiment, $\\mathrm{PE}_{\\mathrm{spatial}}$ is state-of-the-art among various vision backbones. This is significant, as contrastive encoders (especially large ones like MetaCLIP-G [152]) usually perform very poorly on detection, with smaller models often performing better. Typically, encoders only scale for detection if using spatial pretraining or a significant amount of detection data [98] is used to align them directly to downstream tasks. In contrast, $\\mathrm{PE}_{\\mathrm{spatial}}$ uses no detection data for alignment, making it general.", + "bbox": [ + 107, + 686, + 887, + 792 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "System-Level Detection. In Tab. 15, we provide a system-level end-to-end finetuning comparison vs. the absolute state-of-the-art in COCO detection. With only Object365 [120] as extra detection data, $\\mathrm{PE}_{\\mathrm{spatial}}$ can match the performance of more complex models tuned for detection, while only using a simple DETR-style decoder [12, 99]. $\\mathrm{PE}_{\\mathrm{spatial}}$ marks the first general, contrastively pretrained model to accomplish this.", + "bbox": [ + 109, + 803, + 550, + 910 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/8a7f45e906b49ad1978fa1578eb35d9ff55a0a97c4468b17d2db66e85fd3b4a2.jpg", + "table_caption": [ + "Table 14 End-to-End Finetuning Detection and Segmentation using Mask R-CNN [43] and VitDet [72] in a controlled setting. Details in Appendix B.3.4." + ], + "table_footnote": [], + "table_body": "
EncoderParamsDetectorCOCO APbox
SwinV2-G [80]3.0BHTC++ [14]62.5
Swin-L [79]0.3BDINO [161]63.2
EVA02-L [35]0.3BCascade [11]64.1
InternImage-G [145]3.0BDINO [161]65.3
EVA02-L [35]0.3BCoDETR [169]65.9
PEspatialG1.9BDETA [99]66.0
", + "bbox": [ + 581, + 801, + 874, + 878 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 15 System-Level Comparison on Detection. Comparing to the leading results on COCO [76] val2017. See Appendix B.3.5 for training recipe.", + "bbox": [ + 571, + 881, + 885, + 922 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "6 Related Work", + "text_level": 1, + "bbox": [ + 111, + 80, + 282, + 97 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Learning vision-semantic representations has long been the leading approach for developing foundational models in perception. By aligning visual and textual representations, these models excel not only in vision tasks such as zero-shot image classification and image-text retrieval [51, 106, 117], open-vocabulary detection [63, 94, 95] and segmentation [22, 28], but also serve as the basis for multi-modal large language models (MLLMs) [3, 5, 78, 93, 101, 134].", + "bbox": [ + 109, + 113, + 883, + 189 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Contrastive Language-Image Pretraining. The early works of Virtex [27], ICMLM [115], and ConViRT [163] developed the techniques for learning through contrastive objectives between vision and language modalities. Subsequently, vision encoders such as CLIP [51, 106] and ALIGN [54] scaled these techniques to much larger datasets and model sizes, popularizing vision-language contrastive learning. A series of open-weight contrastive models have been developed to enhance the performance and robustness of CLIP [33, 71, 117, 129, 152, 160]. For instance, SigLIP [160] replaces the traditional softmax with a sigmoid function in contrastive learning, while FLIP [74] employs masking techniques to expedite the training process. We are among this effort and build a state-of-the-art open Perception Encoder (PE) (§2.1). Other objectives that have proven useful for building visual encoders include captioning loss, which learns to predict image descriptions using a language model decoder and transfers well to downstream multi-modal language modeling tasks [37, 137]. Many works are now attempting to combine two or more objectives to address different downstream tasks through pretraining with multiple objectives [37, 158] or training sequentially [19, 66].", + "bbox": [ + 109, + 200, + 885, + 383 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Efficient Training. Various axes of efficient training of clip models have been explored. BASIC [102] and LAION [117] explored scaling the batch size up to 160K, and shows the benefits of large batch sizes during training. EVA-CLIP [130] uses LAMB optimizer [156] for large batch training of clip models. Rotary positional embedding (RoPE) [127] has been successfully adopted in large language models. In vision transformers [2, 48] adopted 2D rotatory positional embeddings. For data engine, a series of works focus on large-scale sourcing and filtering through efficient data curation [33, 39, 117, 152] and explore recaptioning training images using MLLMs or VLMs [32, 64, 96, 151]. We extend these concepts to build a video data engine and scale our model to function as one strong model for both image and video (§2.2).", + "bbox": [ + 109, + 393, + 883, + 513 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Best Embedding Layer Inside the Network. Typically, most vision encoders rely on the last layer to extract features for the task it is trained on. However, when trained on proxy or self-supervised tasks, the last layer is often not the ideal candidate for other tasks [8, 15, 16, 30, 85, 107, 121, 128, 142, 159, 166]. For example, when using image colorization as pretraining objective, [162, 166] showed that the middle layers were better at image classification compared to last layers. Subsequently, in iGPT [15], when trained for next token prediction, intermediate layers performed better at image classification. AIMv1 [30] also showed similar behavior for image based next token prediction with patch normalized MSE loss. Toto [107] showed this can be extended for next token prediction in videos, and intermediate layers are best for image classification, video classification, tracking and robotics. REPA [159] showed this behavior for image generation models, where the intermediate layers of SiT [85] has better linear probing accuracy compared to earlier or later layers. In CLIP models, CLIPer [128] identified that early layers in CLIP possess good spatial understanding. In contrast to these lines of work, in this paper, we first show this behavior is not limited to one class of encoders. Specifically, we show this behavior exists in a spatially self-supervised model [98], generative captioning model [37], and also in our own PE. Then we study this behavior for PE encoder in depth, and show it is possible for CLIP training to produce rich spatial and semantic features in intermediate layers (§3).", + "bbox": [ + 109, + 527, + 885, + 753 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Alignment Tuning. We explore alignment tuning for language (§4) and for spatial understanding (§5). For language alignment, we focus on adapting to multimodal large language models (MLLMs); for spatial alignment, we employ self-distillation of the models own features combined with a teacher for locality. In MLLM literature, midtraining—i.e., a middle stage of training used to exploit large-scale multimodal data—has been actively studied. LLaVA-OneVision [66], InternVL series [18, 19], QwenVL series [3, 144], and several other leading MLLMs [82, 132] adopt this paradigm. Our $\\mathrm{PE}_{\\mathrm{lang}}$ can be seen as a variant of midtraining, but with one critical difference in principle: our goal is not to build the best MLLM, but to make the vision encoder the most general. Throughout §4, we benchmark our $\\mathrm{PE}_{\\mathrm{lang}}$ across different language models, input resolution, on various tasks for image and video to show this generality. For spatial tasks, we utilize the hidden embeddings", + "bbox": [ + 109, + 765, + 883, + 902 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "in the intermediate layers. Recently, several works showed the effectiveness of distilling teacher model via representation alignment with cosine similarity. REPA [159] distilled an early layer features of DINO for image diffusion models, RADIO [110] used multi-teacher distillation (DINO, CLIP and SAM). The key idea is to borrow semantic understanding (e.g., CLIP) and spatial understanding (e.g., SAM, DINO) of a pretrained vision encoders. In our $\\mathrm{PE}_{\\mathrm{spatial}}$ , we exploit the intermediate features of $\\mathrm{PE}_{\\mathrm{core}}$ for semantics, and a novel way to use SAM for spatial understanding.", + "bbox": [ + 109, + 80, + 887, + 174 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 109, + 194, + 261, + 212 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We have presented Perception Encoders (PE), a family of best-in-class foundation models comprising $\\mathrm{PE}_{\\mathrm{core}}$ , $\\mathrm{PE}_{\\mathrm{lang}}$ , and $\\mathrm{PE}_{\\mathrm{spatial}}$ . We have shown that $\\mathrm{PE}_{\\mathrm{core}}$ can outperform models trained with WebLI and JFT-3B, which were previously the undisputed leaders in zero-shot image recognition, while also excelling in zero-shot video recognition. We have demonstrated that $\\mathrm{PE}_{\\mathrm{lang}}$ can be used to build a multimodal language model [21] that is at the forefront of the field in terms of performance. We have established that $\\mathrm{PE}_{\\mathrm{spatial}}$ can match the long-standing state-of-the-art in object detection with a significantly simpler decoder. Throughout all of this, one conclusion is abundantly clear: Perception Encoder unlocks the potential to scale simple contrastive vision-language pretraining to address a wide range of downstream vision tasks.", + "bbox": [ + 109, + 227, + 888, + 352 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Additional Contributors and Acknowledgments. We would like to thank Abhimanyu Dubey, Adel Ahmadyan, Andrew Westbury, Arkabandhu Chowdhury, Azita Shokrpour, Babak Damavandi, Chay Ryali, Cyprien de Lichy, Didac Suris Coll-Vinent, Dong Wang, Filip Radenovic, George Orlin, Han Zou, Harry Tran, Jitendra Malik, Joelle Pineau, Joseph Greer, Kavya Srinet, Kirmani Ahmed, Laura Gustafson, Lu Zhang, Muhammad Maaz, Natalia Neverova, Nicolas Carion, Oleksandr Maksymets, Ramya Raghavendra, Romy Luo, Ronghang Hu, Sam Doud, Sasha Mitts, Sean Bell, Shane Moon, Shuming Hu, Soerian Lieve, Stephane Kasriel, Valentin Gabeur, Vanessa Stark, Vignesh Ramanathan, Vivian Lee, Xuan Hu, Yang Li, and Ziyang Wang for their contributions and support for the project. And we thank you, the reader, for reading this far.", + "bbox": [ + 109, + 359, + 888, + 484 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "A Video Data Engine", + "text_level": 1, + "bbox": [ + 109, + 80, + 333, + 99 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.1 Video Caption", + "text_level": 1, + "bbox": [ + 109, + 114, + 274, + 132 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "LLM Summarization prompt", + "text_level": 1, + "bbox": [ + 109, + 143, + 308, + 159 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "LLM Summarization prompt 72 tokens", + "text_level": 1, + "bbox": [ + 117, + 170, + 346, + 181 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Create a concise caption of a video using the provided metadata, video caption, and frame captions.", + "bbox": [ + 117, + 185, + 766, + 196 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "TASK: Extract key information from the captions and combine it into an alt text format using single phrase or set of phrases that includes all relevant details.", + "bbox": [ + 120, + 198, + 875, + 220 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Steps to Follow:", + "bbox": [ + 120, + 220, + 227, + 233 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Review the metadata (title and description) for general context, you can rely it for entity names but do not rely on it as the primary source of information for your caption.", + "2 . Blend title / description with video caption and frame captions for the main storyline", + "3. Extract the most relevant and concise information.", + "4. Combine extracted information into a alt text format using short phrase or set of phrases with approximately 120 tokens, considering special characters like comma as part of the token count.", + "5. Prioritize including all key information over sentence structure or grammar.", + "6. Minimize the use of special characters and focus of key information." + ], + "bbox": [ + 120, + 233, + 875, + 327 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "What to Avoid:", + "bbox": [ + 120, + 329, + 215, + 339 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Avoid adding or inferring information not present in the original metadata and captions.", + "- Avoid using complex sentence structures or prioritizing sentence flow." + ], + "bbox": [ + 120, + 340, + 705, + 364 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Create a concise caption of the video based on the metadata, video caption, and frame captions.", + "bbox": [ + 120, + 366, + 741, + 376 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.2 PE Video Dataset Details", + "text_level": 1, + "bbox": [ + 109, + 398, + 366, + 412 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "PE Video is a dataset that we collected and curated from a licensed data source. The videos are high-resolution and high-quality with a focus on motion. The total number of videos is 1M. Among these, 120K videos have human-refined video captions, and we selected 15K from the 120K videos as a benchmark.", + "bbox": [ + 109, + 422, + 885, + 467 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.2.1 Video Data Filtering Pipeline", + "text_level": 1, + "bbox": [ + 109, + 484, + 359, + 500 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The goal of video data filtering is to identify videos that contain motions such as object motion, camera motion, interaction between objects, human actions, sequences of actions, and manipulation of objects, while rejecting videos with static scenes, like landscapes, or those that are artificial or highly edited.", + "bbox": [ + 109, + 508, + 885, + 555 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To achieve this, we created a video filtering pipeline consisting of the following steps:", + "bbox": [ + 109, + 561, + 720, + 577 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Step1: Compute motion features. For each video, we compute a list of features from video frames, including frames per second (fps), number of frames, number of I-frames, motion vector magnitude, and motion vector variance, using off-the-shelf tools like OpenCV [10].", + "Step 2: Extract video frame features. For each video, we uniformly sample three frames and encode them using a DINOv2 model [98] and a SigLIP model [160].", + "Step 3: LLM Features. For each video, we also run a multimodal large language model (LLM) like LlamaOnevision QwenLM 2 0.5B [66] to extract MLLM features. We composed a list of 26 questions and performed MLLM inference on the videos. The questions can be found here in §A.2.2.", + "Step 4: Video Quality Scoring. We combine all the features collected so far and use a random forest model to predict a score between 0 and 5. To train the model, we manually annotated approximately 1,000 videos with scores between 0 and 5. A low score indicates that the video is almost static and can be nearly summarized by a single frame, while a high score indicates that there are multiple temporal events in the video, requiring several frames to accurately caption it. We use these annotated videos as training data to fit a random forest model for video quality score prediction.", + "Step 5: We apply k-means clustering to the videos and rank them within each cluster. By selecting the top-ranked videos from each cluster, we effectively reduce the number of duplicated videos in the final dataset." + ], + "bbox": [ + 109, + 588, + 885, + 878 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.2.2 LLM Feature Extraction", + "text_level": 1, + "bbox": [ + 111, + 80, + 330, + 94 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "LLM Feature extraction question list", + "text_level": 1, + "bbox": [ + 119, + 108, + 359, + 119 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the camera capturing the scene static? Reply yes or no.", + "bbox": [ + 119, + 123, + 503, + 135 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the camera capturing the scene moving? Reply yes or no.", + "bbox": [ + 120, + 136, + 501, + 146 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the video capturing a landscape? Reply yes or no.", + "bbox": [ + 120, + 147, + 462, + 157 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the video capturing a static scene? Reply yes or no.", + "bbox": [ + 120, + 159, + 482, + 170 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the scene captured from a distance? Reply yes or no.", + "bbox": [ + 120, + 171, + 480, + 181 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the video captured with a drone? Reply yes or no.", + "bbox": [ + 120, + 183, + 462, + 194 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the video computer-generated? Reply yes or no.", + "bbox": [ + 120, + 195, + 442, + 205 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the video content abstract? Reply yes or no.", + "bbox": [ + 120, + 207, + 429, + 218 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is there something moving through the scene? Reply yes or no.", + "bbox": [ + 120, + 219, + 522, + 229 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is there someone doing something in the video? Reply yes or no.", + "bbox": [ + 120, + 231, + 534, + 242 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there several things moving in the video? Reply yes or no.", + "bbox": [ + 120, + 243, + 527, + 253 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is there an object that is being manipulated? Reply yes or no.", + "bbox": [ + 120, + 255, + 527, + 266 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there animals in the video? Reply yes or no.", + "bbox": [ + 120, + 267, + 436, + 277 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is the scene mostly static? Reply yes or no.", + "bbox": [ + 120, + 279, + 410, + 290 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are things occluding each other in this video? Reply yes or no.", + "bbox": [ + 120, + 291, + 532, + 301 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is there something obstructing the view apart from the watermark? Reply yes or no.", + "bbox": [ + 120, + 303, + 656, + 314 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is there a large number of things in the video? Reply yes or no.", + "bbox": [ + 120, + 315, + 540, + 325 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there more than 5 different objects in the video? Reply yes or no.", + "bbox": [ + 120, + 327, + 578, + 338 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is it hard to keep track of some entities because they are moving so much? Reply yes or no.", + "bbox": [ + 120, + 339, + 715, + 349 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Is someone looking at a phone, a tablet or a computer screen? Reply yes or no.", + "bbox": [ + 120, + 351, + 630, + 361 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are they looking at a phone, a tablet or a computer screen during the whole video? Reply yes or no.", + "bbox": [ + 120, + 363, + 767, + 373 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there several moving persons in this video? Reply yes or no.", + "bbox": [ + 120, + 375, + 540, + 386 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there several moving animals in this video? Reply yes or no.", + "bbox": [ + 120, + 387, + 540, + 398 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there several objects in this video? Reply yes or no.", + "bbox": [ + 120, + 398, + 493, + 409 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Are there several similar-looking objects in the video? Reply yes or no.", + "bbox": [ + 120, + 411, + 591, + 421 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Do they look similar? Reply yes or no.", + "bbox": [ + 120, + 422, + 370, + 433 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We use LLaVA-OneVision [78] model to extract LLM features from the videos. For each video, we prompt with 26 different questions to extract features ranging from, \"is the video a landscape video?\" to, \"are there any moving objects in the video?\" The features are then used by a random forest model to determine the video quality score.", + "bbox": [ + 109, + 443, + 885, + 503 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.2.3 PVD Benchmark Distribution", + "text_level": 1, + "bbox": [ + 109, + 522, + 364, + 535 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/3b9c2fc708c6ae91f43db0079c0a53d5a6bfe209c1b5de951dd81e4a3cdb737b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryNumber of videosAvg. Caption Length
Hand Actions214354.2
Object Interactions186442.6
Food Preparation169156.8
Work Activities168947.8
Outdoor Scenes155850.7
Animals142350.9
Water Scenes133744.6
Object Handling130751.6
Close-up Shots112245.1
Nature Scenes86638.4
", + "bbox": [ + 375, + 551, + 620, + 672 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 16 PVD Benchmark Statistics. We created a dataset of 15K videos together with human-verified captions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes.", + "bbox": [ + 109, + 675, + 885, + 705 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/f60491c1bd688dfc6de41ba84a4f1eabcb23cc6b5712bda279c86259deaa53bc.jpg", + "image_caption": [ + "Category: Hand Actions" + ], + "image_footnote": [], + "bbox": [ + 116, + 150, + 493, + 208 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video captures a closeup shot of person typing on a keyboard. The camera moves from the left side of the keyboard to the right, an animation of the revolving globe and some numbers can be seen in the frame and the video ends.", + "bbox": [ + 127, + 229, + 483, + 251 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/6e9ec06538492e622f82c5cc96b947e2ebc7d1e0c82c1caab229a7bc02bc9d11.jpg", + "image_caption": [ + "Category: Object Interactions" + ], + "image_footnote": [], + "bbox": [ + 501, + 152, + 880, + 208 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a black and white spiral that is spinning. The spiral is made up of alternating black and white stripes that are evenly spaced and symmetrical.", + "bbox": [ + 521, + 232, + 859, + 247 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/5588664e10ed778735c066db9c914cdbe8fd018b0c878322cb89d5734afe3da3.jpg", + "image_caption": [ + "Category: Food Preparation" + ], + "image_footnote": [], + "bbox": [ + 116, + 277, + 493, + 338 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a person cutting an green color item into small pieces. They are using a knife to slice the pickle into thin pieces, and then chopping those pieces into smaller cubes. The person is working on a wooden cutting board, and the Hands are visible from the left side of the frame with pink nail paint on their nails.", + "bbox": [ + 122, + 353, + 488, + 382 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7478d5141e36888fb5f937cef316c63db3911b313ad911182a76d9d80cc5f380.jpg", + "image_caption": [ + "Category: Work Activities" + ], + "image_footnote": [], + "bbox": [ + 503, + 277, + 880, + 338 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a person using a shovel to clean the ashes from a fireplace. They are scooping up the ashes and removing them from the fireplace.", + "bbox": [ + 514, + 359, + 867, + 375 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/a7faa8bb779978d62ee631eba3e97506f422bda9d1a13e3b20786dd962483c5d.jpg", + "image_caption": [ + "Category: Outdoor Scenes" + ], + "image_footnote": [], + "bbox": [ + 116, + 402, + 493, + 468 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a tall, pointed structure in the middle of a field. and the structure is surrounded by trees and other vegetation. The field is divided into sections, with some areas covered in green grass and others covered in white material. The video shows the structure and the field from a distance, with the camera moving around it.", + "bbox": [ + 125, + 479, + 483, + 510 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/e1f6bd1a6e1428498e5dad05ef9684dc27e65ce2230edda4cda2e837c0fd68b8.jpg", + "image_caption": [ + "Category: Animals" + ], + "image_footnote": [], + "bbox": [ + 501, + 406, + 880, + 468 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a white and gray adult cat and two kittens. The adult cat is grooming the kitten closest to it with its tongue, and the kitten is looking around. A hand reaches out from the frame's upper left to pet the two kittens.", + "bbox": [ + 511, + 483, + 870, + 506 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/319104e044a229bfe0fc6e17d447be4ef0e0b642fbd22115c20140690a4d8292.jpg", + "image_caption": [ + "Category: Water Scenes" + ], + "image_footnote": [], + "bbox": [ + 116, + 532, + 493, + 589 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a large school of fish swimming in a water body towards the right frame. The camera too pans a little to the right.", + "bbox": [ + 133, + 614, + 477, + 631 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/de294a290d605ae638c932a5b630ab3c7b2b5a3f0a9a6ff7d04285e7b65f6ea8.jpg", + "image_caption": [ + "Category: Object Handling" + ], + "image_footnote": [], + "bbox": [ + 501, + 532, + 880, + 589 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a person putting a bowl of something into an oven. The person then closes the oven door. The background is blurry.", + "bbox": [ + 514, + 614, + 867, + 631 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/84daeda79e0b2b2f621e8931263c95a39b53a1427968537d1a6a10eec3f36ab8.jpg", + "image_caption": [ + "Category: Close-up Shots" + ], + "image_footnote": [], + "bbox": [ + 116, + 666, + 493, + 719 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a white counter with two brown buckets and a yellow bucket. Then a person's right hand wearing a green glove enters the frame from top right side and place a yellow flower near to yellow watering can. The person then places the flower, in front of the buckets and exits the frame. In the background is a brown wall, and the camera is static throughout the clip.", + "bbox": [ + 122, + 736, + 488, + 765 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7a97e51e5aed728ca60da5c0696ac87233b5df8d196112a189b1a35ae2cf82df.jpg", + "image_caption": [ + "Category: Nature Scenes", + "Figure 18 More PE Video Dataset Examples. For each of the ten categories, we randomly pick one video and show its video caption. The captions were generated by our video data pipeline and then refined by human annotators." + ], + "image_footnote": [], + "bbox": [ + 501, + 662, + 880, + 719 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption: The video shows a pile of branches and leaves on fire in a field. The fire is burning brightly, with flames licking at the edges of the pile. The smoke from the fire rises into the air, billowing up into the sky.", + "bbox": [ + 514, + 739, + 864, + 762 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "B Implementation Details", + "text_level": 1, + "bbox": [ + 111, + 80, + 382, + 99 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "B.1 PE Core", + "text_level": 1, + "bbox": [ + 111, + 114, + 225, + 130 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "We provide additional implementation details for building $\\mathrm{PE}_{\\mathrm{core}}$ . Our implementation is based on OpenCLIP5.", + "bbox": [ + 109, + 141, + 887, + 157 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "B.1.1 Architecture and Training Setups", + "text_level": 1, + "bbox": [ + 109, + 174, + 387, + 190 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Model Architecture. Following CLIP, $\\mathrm{PE}_{\\mathrm{core}}$ comprises a Transformer-based [141] vision and a text encoder. We employ customized Transformer configurations as detailed in Tab. 17. For pooling, we an attention pooling block in the style of SigLIP [160] with 8 heads from the last-layer feature to construct image and video embeddings. Regarding positional embedding, we use 2D RoPE [127] for relative positional embeddings and 2D learnable absolute positional embeddings (abs) the same size as the model's input resolution. We interpolate positional embeddings to enable support for various resolutions beyond the default. The text context length is 72 for G-scale and 32 for B and L-scale models. Originally a bug, we find it optimal to not disable the class token when using attention pooling for smaller models. Thus, the B and L models use a class token, then the attention pooling layer probes all features at once (class token included). Finally, we use an input mean and standard deviation of $(0.5,0.5,0.5)$ for simplicity.", + "bbox": [ + 109, + 202, + 888, + 354 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/c293da833f1f864b33c52c0b9a7471a4ecaa95fb54573885bda040013500ef54.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ScaleTowerParamsWidthDepthMLPHeadsCLIP DimPoolingPositional EmbeddingResolution & Context LenPatch SizeClass Token Register
BVision0.09B768123072121024Attn PoolRoPE+Abs22416
Text0.31B102424409616EOS TokenAbs32--
LVision0.32B1024244096161024Attn PoolRoPE+Abs33614
Text0.31B102424409616EOS TokenAbs32--
GVision1.88B1536508960161280Attn PoolRoPE+Abs44814
Text0.47B128024512020EOS TokenAbs72--
", + "bbox": [ + 189, + 364, + 805, + 452 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "PE Core Training. As discussed in §2.4, the training of $\\mathrm{PE}_{\\mathrm{core}}$ involves three stages: 1) image pretraining; 2) image and video finetuning; and 3) an additional model distillation for smaller models. These three stages work together to develop a robust and effective $\\mathrm{PE}_{\\mathrm{core}}$ model.", + "bbox": [ + 109, + 489, + 885, + 535 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "We first provide training recipes for 1) image pretraining in Tab. 18 and 2) video finetuning in Tab. 19.", + "bbox": [ + 109, + 542, + 852, + 558 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/60f27e61231de16ef8490017adbcf2ac7e92d19c25c123c43f3166e7cb26afdf.jpg", + "table_caption": [ + "Table 17 PE Model Configurations with full details." + ], + "table_footnote": [], + "table_body": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate2e-3
batch size131,072
warm-up steps2K
training steps443K (B, L) / 656K (G)
data quantity5.4B
samples seen58B (B, L) / 86B (G)
max logit scale100
mask reg ratio0.4
mask reg batch8192
progressive res112-160-224 (B)
98-154-224-336 (L)
98-154-224-336-448 (G)
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
", + "bbox": [ + 143, + 570, + 339, + 797 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/054f467c3336982f3f223e3b29240981096ae34d8b34df2a325a937eacb719bb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size4096
warm-up steps2K
training steps5.4K
data quantity22M
samples seen22M
max logit scale100
number of frames8
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
", + "bbox": [ + 408, + 570, + 612, + 750 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/ef15fc2bd9455fb292713d568e39c9a6348ca77f140ee0f3585a04410a9ad1b8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size16384
warm-up steps2K
training steps269K
data quantity5.4B
samples seen4.4B
max logit scale100
teacher logit scale200 (§C.3)
data augNone
", + "bbox": [ + 694, + 570, + 839, + 723 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 20 Distillation.", + "bbox": [ + 700, + 727, + 834, + 739 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 19 Video Finetuning.", + "bbox": [ + 424, + 753, + 594, + 767 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 18 Image Pretraining.", + "bbox": [ + 151, + 801, + 326, + 814 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "After training the largest G-scale model, we train the smaller models with image pretraining, then distill with image distillation in Tab. 20, then finally apply video finetuning at the end.", + "bbox": [ + 109, + 830, + 883, + 861 + ], + "page_idx": 24 + }, + { + "type": "page_footnote", + "text": "5https://github.com/mlfoundations/open Clip", + "bbox": [ + 127, + 869, + 418, + 883 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "B.1.2 Zero-Shot Classification and Retrieval", + "text_level": 1, + "bbox": [ + 109, + 80, + 424, + 94 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Zero-Shot Evaluation on Images and Videos. We use CLIPBench for zero-shot classification and retrieval benchmarking. The benchmark datasets and splits are obtained from the original dataset websites or HuggingFace. We extend the CLIPBench zero-shot evaluation to include video datasets such as MSR-VTT and Kinetics, and will release our model checkpoints, evaluation code, and scripts for reproducibility.", + "bbox": [ + 109, + 108, + 885, + 170 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Prompt Design. For zero-shot image-text and video-text retrieval, we rely solely on the original captions without any additional prompts. In contrast, for zero-shot classification, we utilize task-specific prompts graciously provided by the InternVL [19] authors. All additional prompts will be released.", + "bbox": [ + 109, + 181, + 887, + 227 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "For example, we employ specific prompts for zero-shot image classification on various ImageNet benchmarks (e.g., ImageNet val, ImageNet v2) and video classification on Kinetics datasets (e.g., K400, K600, K700).", + "bbox": [ + 109, + 233, + 883, + 265 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Zero-Shot Image Classification Prompts - ImageNet", + "text_level": 1, + "bbox": [ + 119, + 273, + 444, + 285 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "a bad photo of a $\\{\\mathbf{c}\\}$ . a photo of many $\\{\\mathbf{c}\\}$ . a sculpture of a $\\{\\mathbf{c}\\}$ . a photo of the hard to see $\\{\\mathbf{c}\\}$ . a low resolution photo of the $\\{\\mathbf{c}\\}$ . a rendering of a $\\{\\mathbf{c}\\}$ . graffiti of a $\\{\\mathbf{c}\\}$ . a bad photo of the $\\{\\mathbf{c}\\}$ . a cropped photo of the $\\{\\mathbf{c}\\}$ . a tattoo of a $\\{\\mathbf{c}\\}$ . the embroidered $\\{\\mathbf{c}\\}$ . a photo of a hard to see $\\{\\mathbf{c}\\}$ . a bright photo of a $\\{\\mathbf{c}\\}$ . a photo of a clean $\\{\\mathbf{c}\\}$ . a photo of a dirty $\\{\\mathbf{c}\\}$ . a dark photo of the $\\{\\mathbf{c}\\}$ . a drawing of a $\\{\\mathbf{c}\\}$ . a photo of my $\\{\\mathbf{c}\\}$ . the plastic $\\{\\mathbf{c}\\}$ . a photo of the cool $\\{\\mathbf{c}\\}$ . a close-up photo of a $\\{\\mathbf{c}\\}$ . a black and white photo of the $\\{\\mathbf{c}\\}$ . a painting of the $\\{\\mathbf{c}\\}$ . a painting of a $\\{\\mathbf{c}\\}$ . a pixelated photo of the $\\{\\mathbf{c}\\}$ . a sculpture of the $\\{\\mathbf{c}\\}$ . a bright photo of the $\\{\\mathbf{c}\\}$ . a cropped photo of a $\\{\\mathbf{c}\\}$ . a plastic $\\{\\mathbf{c}\\}$ . a photo of the dirty $\\{\\mathbf{c}\\}$ . aJPEG corrupted photo of a $\\{\\mathbf{c}\\}$ . a blurry photo of the $\\{\\mathbf{c}\\}$ . a photo of the $\\{\\mathbf{c}\\}$ . a good photo of the $\\{\\mathbf{c}\\}$ . a rendering of the $\\{\\mathbf{c}\\}$ . a $\\{\\mathbf{c}\\}$ in a video game. a photo of one $\\{\\mathbf{c}\\}$ . a doodle of a $\\{\\mathbf{c}\\}$ . a close-up photo of the $\\{\\mathbf{c}\\}$ . a photo of a $\\{\\mathbf{c}\\}$ . the origami $\\{\\mathbf{c}\\}$ . the $\\{\\mathbf{c}\\}$ in a video game. a sketch of a $\\{\\mathbf{c}\\}$ . a doodle of the $\\{\\mathbf{c}\\}$ . a origami $\\{\\mathbf{c}\\}$ . a low resolution photo of a $\\{\\mathbf{c}\\}$ . the toy $\\{\\mathbf{c}\\}$ . a rendition of the $\\{\\mathbf{c}\\}$ . a photo of the clean $\\{\\mathbf{c}\\}$ . a photo of a large $\\{\\mathbf{c}\\}$ . a rendition of a $\\{\\mathbf{c}\\}$ . a photo of a nice $\\{\\mathbf{c}\\}$ . a photo of a weird $\\{\\mathbf{c}\\}$ . a blurry photo of a $\\{\\mathbf{c}\\}$ . a cartoon $\\{\\mathbf{c}\\}$ . art of a $\\{\\mathbf{c}\\}$ . a sketch of the $\\{\\mathbf{c}\\}$ . a embroidered $\\{\\mathbf{c}\\}$ . a pixelated photo of a $\\{\\mathbf{c}\\}$ . itap of the $\\{\\mathbf{c}\\}$ . a JPEG corrupted photo of the $\\{\\mathbf{c}\\}$ . a good photo of a $\\{\\mathbf{c}\\}$ . a plushie $\\{\\mathbf{c}\\}$ . a photo of the nice $\\{\\mathbf{c}\\}$ . a photo of the small $\\{\\mathbf{c}\\}$ . a photo of the weird $\\{\\mathbf{c}\\}$ . the cartoon $\\{\\mathbf{c}\\}$ . art of the $\\{\\mathbf{c}\\}$ . a drawing of the $\\{\\mathbf{c}\\}$ . a photo of the large $\\{\\mathbf{c}\\}$ . a black and white photo of a $\\{\\mathbf{c}\\}$ . the plushie $\\{\\mathbf{c}\\}$ . a dark photo of a $\\{\\mathbf{c}\\}$ . itap of a $\\{\\mathbf{c}\\}$ . graffiti of the $\\{\\mathbf{c}\\}$ . a toy $\\{\\mathbf{c}\\}.$ itap of my $\\{\\mathbf{c}\\}.$ a photo of a cool $\\{\\mathbf{c}\\}.$ a photo of a small $\\{\\mathbf{c}\\}.$ a tattoo of the $\\{\\mathbf{c}\\}.$", + "bbox": [ + 117, + 289, + 880, + 483 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Zero-Shot Video Classification Prompts - Kinetics", + "text_level": 1, + "bbox": [ + 119, + 494, + 444, + 505 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "a photo of $\\{\\mathbf{c}\\}$ . a photo of a person $\\{\\mathbf{c}\\}$ . a photo of a person using $\\{\\mathbf{c}\\}$ . a photo of a person doing $\\{\\mathbf{c}\\}$ . a photo of a person during $\\{\\mathbf{c}\\}$ . a photo of a person performing $\\{\\mathbf{c}\\}$ . a photo of a person practicing $\\{\\mathbf{c}\\}$ . a video of $\\{\\mathbf{c}\\}$ . a video of a person using $\\{\\mathbf{c}\\}$ . a video of a person doing $\\{\\mathbf{c}\\}$ . a video of a person during $\\{\\mathbf{c}\\}$ . a video of a person performing $\\{\\mathbf{c}\\}$ . a video of a person practicing $\\{\\mathbf{c}\\}$ . a example of $\\{\\mathbf{c}\\}$ . a example of a person $\\{\\mathbf{c}\\}$ . a example of a person using $\\{\\mathbf{c}\\}$ . a example of a person doing $\\{\\mathbf{c}\\}$ . a example of a person during $\\{\\mathbf{c}\\}$ . a example of a person performing $\\{\\mathbf{c}\\}$ . a example of a person practicing $\\{\\mathbf{c}\\}$ . a demonstration of $\\{\\mathbf{c}\\}$ . a demonstration of a person $\\{\\mathbf{c}\\}$ . a demonstration of a person using $\\{\\mathbf{c}\\}$ . a demonstration of a person doing $\\{\\mathbf{c}\\}$ . a demonstration of a person during $\\{\\mathbf{c}\\}$ . a demonstration of a person performing $\\{\\mathbf{c}\\}$ .", + "bbox": [ + 117, + 508, + 879, + 606 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Evaluation Method. Several works use different input transformations for different datasets when evaluating zero-shot performance (e.g., [33, 130, 138, 160]). To be as fair as possible, we follow [130] in evaluating with two transformations—center crop and non aspect ratio preserving resize (\"squash\")—and report the max between the two for all models and all datasets we evaluate. Additionally, ObjectNet has a red border around every image to facilitate dedduplication, which we remove for evaluation. Finally, we follow [19] in using retrieval reweighting (DSL), applying the softmax score distribution to the similarities used for retrieval:", + "bbox": [ + 109, + 619, + 887, + 710 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s c o r e s} = \\text {s c o r e s} * \\text {s o f t m a x} (\\text {s c o r e s}, \\dim = 0) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 720, + 885, + 736 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "This slightly improves retrieval for most models, so we do it for all models we evaluate for fairness. Notably, we were able to reproduce the reported numbers for most papers with these techniques, but for cases where we could not, we default to the reported number.", + "bbox": [ + 109, + 746, + 887, + 792 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "B.2 PE: Language Alignment", + "text_level": 1, + "bbox": [ + 109, + 810, + 362, + 825 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We provide details of the MLLM experimental setup in $\\S 4$ . We describe data, model, and training separately.", + "bbox": [ + 109, + 834, + 887, + 851 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Data. Our MLLM training contains warmup data and supervised finetuning (SFT) data. Our warmup data is a 1M subset image-text pairs of our $\\mathrm{PE}_{\\mathrm{core}}$ pretraining dataset. For SFT data, we use a diverse data", + "bbox": [ + 109, + 861, + 887, + 892 + ], + "page_idx": 25 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://github.com/LAION-AI/CLIP_benchmark", + "bbox": [ + 127, + 898, + 444, + 912 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "mix consisting of 2.6M unique samples. This dataset is composed of $1.7\\mathrm{M}^7$ visual QAs samples from the Cauldron [65], 0.5M grounded QA pairs from Visual Genome [60], Flickr-Entities [103] and Densely Captioned Images [139], 0.1M image-captioning pairs from COCO [76] and 0.3M text-only samples. This comprehensive data mix allows us to thoroughly assess our model's capabilities in various MLLM tasks.", + "bbox": [ + 109, + 80, + 887, + 141 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Model. As described in § 4.1, we use a simple vision-language model architecture where a vision encoder and a pretrained decoder-only LLM are connected by a vision projector. For all tables, we use either Llama3.1-instruct 8B or QwenLM 2.5-instruct 7B as a language model, and 2-layer MLP as a vision projector. For fair comparison, we use the native resolution for image input. During inference, we evaluate the models on video tasks in zero-shot manner: We concatenate all video frames into a sequence and feed to language model, without seeing video samples during SFT. For all video tasks, we use 8 frames with the same native resolution of height and width. For $\\mathrm{PE}_{\\mathrm{core}}$ and $\\mathrm{PE}_{\\mathrm{lang}}$ , this makes $448 \\times 448 \\times 8$ input and $32 \\times 32 \\times 8$ vision tokens.", + "bbox": [ + 109, + 152, + 887, + 261 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Training. MLLM training consists of warmup and supervised finetuning (SFT) stages. In both stages, we freeze vision encoder and train vision projector and LLM. During warmup stage, we use a global batch size of 128 with a learning rate of $1 \\times 10^{-4}$ . We gradually increase the learning rate from $1 \\times 10^{-6}$ to $1 \\times 10^{-4}$ over 120 steps, and follow a cosine learning rate decay schedule to train a total of 8,000 steps. During SFT stage, we use a global batch size 256 with a learning rate of $1 \\times 10^{-5}$ . Similar to the warmup, we gradually increase the learning rate from $1 \\times 10^{-7}$ to $1 \\times 10^{-5}$ over 300 steps, and follow a cosine learning rate decay schedule to train a total of 12.5K steps. We truncate text-sequences longer than 2,048 tokens on top the visual tokens. This makes the maximum sequence length to be (num. vision tokens) + 2,048. With $448 \\times 448$ input resolution and patch size of 14, we set the maximum sequence length to $1,024 + 2,048 = 3,072$ . To represent bounding boxes on output side for image grounding tasks, we simply use text tokens to represent each bounding box: each coordinate is normalized between 000 and 999, in “[x, y, x, y]” box format for top-left and bottom-right corners (e.g., [012, 122, 633, 782]).", + "bbox": [ + 109, + 271, + 888, + 454 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "For all baselines, we search for the best intermediate layer features to adapt to LLM. We search over $\\{-1, -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -40\\}$ layers (counting from last) and report the best result in average over OCR/Chart/Document Q&A, Visual Q&A, Image Captioning and Video Understanding.", + "bbox": [ + 109, + 459, + 887, + 506 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "B.3 PE: Spatial Alignment", + "text_level": 1, + "bbox": [ + 109, + 523, + 341, + 540 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "B.3.1 Training Details", + "text_level": 1, + "bbox": [ + 109, + 550, + 272, + 565 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Loss Functions. For self-aligning to frozen $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ layer 41 features ( $L_{\\mathrm{core}}$ ), we minimize cosine similarity:", + "bbox": [ + 109, + 577, + 875, + 593 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {c o r e}} = \\frac {1}{n _ {\\mathrm {t o k}}} \\sum \\left(\\frac {\\left(S _ {5 0}\\right) \\left(T _ {4 1}\\right) ^ {T}}{\\left\\| S _ {5 0} \\right\\| \\cdot \\left\\| T _ {4 1} \\right\\|}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 603, + 885, + 640 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "where $S_{50}$ denotes the last layer features of the student, $T_{41}$ denotes frozen layer 41 features from $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ , and $n_{\\mathrm{tok}}$ represents the number of tokens. Note that we chose 41 fairly arbitrarily (it is layer 40 when written with indexing from 0). Judging by Fig. 8, any layer around 40 should work (and 39 may be slightly better).", + "bbox": [ + 109, + 646, + 887, + 694 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "For the encouraging locality loss $(L_{\\mathrm{loc}})$ , we compute the pairwise cosine similarity between a model's own tokens and itself. This forms a \"spatial correspondence map\" for what tokens should be considered similar. We then compute the same for the student, and minimize the difference between the two with MSE loss:", + "bbox": [ + 109, + 700, + 887, + 746 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {l o c}} = \\frac {1}{n _ {\\text {t o k}} ^ {2}} \\sum \\left(\\frac {(S _ {5 0}) (S _ {5 0}) ^ {T}}{| | S _ {5 0} | | ^ {2}} - \\frac {(T _ {\\mathrm {S A M}}) (T _ {\\mathrm {S A M}}) ^ {T}}{| | T _ {\\mathrm {S A M}} | | ^ {2}}\\right) ^ {2} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 756, + 885, + 792 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "where $T_{\\mathrm{SAM}}$ denotes the \"SAM Mask Logits\" constructed in §5.2. We also find using a temperature $(t)$ on the SAM teacher's pairwise cosine similarity term $(x)$ useful: $e^{t(x - 1)}$ . The full loss is $L_{\\mathrm{spatial}} = L_{\\mathrm{core}} + L_{\\mathrm{loc}}$ .", + "bbox": [ + 109, + 803, + 887, + 835 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Hyperparameters. In Tab. 21 we show the training hyperparameters for spatial alignment, finetuned on top of the initial $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ checkpoint. Then in Tab. 22 and Tab. 23, we show the settings for the two teachers and losses. Note that when running the teachers, we run them on the exact same image as the student (same data", + "bbox": [ + 109, + 845, + 888, + 891 + ], + "page_idx": 26 + }, + { + "type": "page_footnote", + "text": "7We excluded multi-images samples.", + "bbox": [ + 129, + 898, + 354, + 912 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "aug and all). Additionally, because the SAM 2.1 teacher operates at a resolution of 1024, we upsample the image, generate the mask logits, and then downsample the result. Both teachers are frozen.", + "bbox": [ + 109, + 80, + 887, + 112 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/5225fa73ec00cf4cdb350eeeb68dc140d3367e44438e9b1899dd91f68f3034d2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate5e-4
batch size12,288
warm-up steps0
training steps24K
data quantity5.4B (PEcore PT Data)
samples seen300M
resolution448
mask ratio0.75
mask size2×2 tokens
droppath0.4
layerscale0.1
aspect jitter ar(0.75,1.33)
data augcolor jitter j(0.32,0,0.32,0)
hflip p(0.5)
", + "bbox": [ + 135, + 122, + 328, + 332 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/136f3d22e44c07b73f44f9a797639d42c616d674d5ecb888fb4f0d58cb59d6b2.jpg", + "table_caption": [ + "Table 21 Spatial Alignment." + ], + "table_footnote": [], + "table_body": "
configvalues
modelSAM 2.1-L
layermask logits
resolution1024 (interp→448)
lossEq. 3
loss weight1
temperature20
sample points32 × 32 (1024)
pred iou threshold0
stability score threshold0
mask threshold0
", + "bbox": [ + 393, + 122, + 604, + 255 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/224c02e75a06293b632476410d4685357e8faa8895d55dbfbc83851eee821798.jpg", + "table_caption": [ + "Table 22 SAM 2.1 Teacher." + ], + "table_footnote": [], + "table_body": "
configvalues
modelPEcoreG
layer41
resolution448
lossEq. 2
loss weight1
", + "bbox": [ + 684, + 122, + 849, + 195 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Table 23 PEcoreG Teacher.", + "bbox": [ + 684, + 199, + 849, + 212 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "B.3.2 Visualization Method", + "text_level": 1, + "bbox": [ + 109, + 375, + 312, + 387 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To visualize the features in Fig. 17 and Fig. 20, our goal is to map a 1536-dimensional space down to 3 dimensions to view how the model encodes each token in relation to each other. One naive approach would be to apply PCA with 3 dimensions across all token in the image. However, we find this alone can be misleading.", + "bbox": [ + 107, + 397, + 888, + 444 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Specifically, if the model has rich semantics, it should be the case that most of those 1536 features have some useful information in them. Some of that information could be spatially contiguous, some of it not. We want PCA to only select the spatially contiguous information, since we are trying to evaluate the spatial quality of the features. However, naively applying PCA will not necessarily do that, especially for models with information aggregated in \"global tokens\" (§5.1). Despite these tokens carrying important information, they are not spatially contiguous. Thus, if PCA dedicates a large portion of its 3 dimensions to global tokens, the features will look like their spatial quality is bad, despite the features containing good spatial information.", + "bbox": [ + 107, + 450, + 888, + 556 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "So, how do we select for only the spatially contiguous information to visualize? The answer is simple: by definition, the spatially contiguous information will be... spatially contiguous. To keep the spatially contiguous information while lowering the impact of the global tokens, we can simply apply a low pass filter to the features (specifically, a gaussian blur with kernel size 3 and a $\\sigma$ of 1). To retain the detail of the original features, we can average the two together. Thus, to visualize features, we use the 3D PCA of the of the following. $x$ denotes the model's output features, and $g(x)$ denotes gaussian blur.", + "bbox": [ + 107, + 563, + 887, + 655 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n0. 5 x + 0. 5 g (x, k = 3, \\sigma = 1) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 662, + 885, + 679 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We show the impact of this in Fig. 19. Blurring the features make them appear more detailed! In reality, that information was always there, just PCA did not show it. Thus, great care must be taken when visualizing high dimensional feature spaces. If they were easy to map to 3 dimensions—you wouldn't need 1536 of them!", + "bbox": [ + 107, + 686, + 887, + 734 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/64c17d9abfbb16b4abdaf37be7f39a74f506431fd7c28364f87176745b193285.jpg", + "image_caption": [ + "Figure 19 Feature Visualization Ablation. With raw features (top row), PCA misses spatially contiguous parts of the feature space and instead focuses on global tokens (which carry information but are not spatially coherent). By applying a simple low pass filter (bottom row), we can reveal spatial information that PCA originally missed (see column 2: with raw features, the background looks like a mess, with the low pass filter the tiles become visible)." + ], + "image_footnote": [], + "bbox": [ + 305, + 752, + 689, + 849 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Then, to map the PCA dimensions to RBG pixel values, we map each PCA component to a corresponding channel in LCh color space, then convert those LCh colors to RGB to get the final image. Note that we use LCh instead of RGB directly for aesthetic reasons, and also because LCh is a cylindrical color space—where smooth changes to the values look like smooth changes in colors to humans—and thus is easier to discern.", + "bbox": [ + 109, + 80, + 887, + 141 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "B.3.3 Frozen Feature Dense Prediction", + "text_level": 1, + "bbox": [ + 109, + 157, + 392, + 172 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We discuss the detailed settings of the results for dense prediction with frozen features in Tab. 13. Each model is evaluated with its native resolution up to 448 or 448 (whichever is optimal).", + "bbox": [ + 109, + 181, + 885, + 212 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Zero-Shot Tracking. We evaluate our pretrained models on label propagation task using the protocols in [52, 107] on DAVIS dataset [104]. This evaluation does not require any finetuning or probing, therefore preserves the spatial features in the model. Following Toto [107], we use the features from the last $n = 7$ frames to find the nearest neighbor patch in the current frame, and then propagate the masks from the previous frames to the current frame. Note that this evaluation method does not require any training.", + "bbox": [ + 109, + 223, + 887, + 299 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Semantic Segmentation. For semantic segmentation, we evaluate our pretrained models on ADE20K [167] semantic segmentation task. We use a linear layer and convolutional layer to map intermediate spatial features to segmentation masks following [98]. The models are evaluated and then features are resized to $518 \\times 518$ . We only use features from single layer. The probing layers are finetuned with AdamW [83] with a learning rate of 0.001.", + "bbox": [ + 109, + 310, + 887, + 386 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Depth Estimation. For depth estimation on NYUv2 [123], we follow [75, 98]. We use a DPT-head [109] on top of our frozen pretrained model and use only single layer features. We scale the size of the DPT-head for each models based on the hidden size for each architecture. Because NYU is a small dataset and the models we evaluate are large, we observe the results for most models are noisy and prone to overfitting. Thus, for fair comparison we train all models for 20 epochs and for all models take the lowest validation loss over all epochs.", + "bbox": [ + 109, + 398, + 887, + 474 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Frozen Detection. For the frozen feature detection results presented in §3, we evaluated using Mask R-CNN [43] as a probe. We used a resolution of 1024 for Fig. 8 and 768 for the remaining experiments in §3. Because the backbones were frozen, we did not add any global attention and instead simply tiled the input image with a window size of 32 for the 1024px experiments and 24 for the 768px experiments. All models were interpolated to patch 16. Finally, the backbones were frozen and only the FPN and R-CNN heads trained for 15 epochs on COCO with a stepwise decay LR without drop path.", + "bbox": [ + 109, + 484, + 887, + 578 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "B.3.4 End-to-End Finetuning Detection and Segmentation", + "text_level": 1, + "bbox": [ + 109, + 594, + 524, + 608 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We provide a detailed discussion of settings of end-to-end finetuning on detection and segmentation presented in Tab. 14. The hyperparameters can be found in Tab. 24. We find that the default 100-epoch protocol in ViTDet [72, 149] causes overfitting problems in COCO experiments especially for billion-level parameter vision encoders, so we tune the training epochs, learning rate, drop path and learning rate decay accordingly.", + "bbox": [ + 109, + 616, + 887, + 678 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "The LVIS experiment setting is the same as COCO except all L-size models use learning rate of 2e-4 and all g-size and G-size models use 75 epochs.", + "bbox": [ + 109, + 684, + 887, + 715 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/3b8467cf873fe448328bb00c09bd6f8eaa56dfe7a5132e65f77156c552df6aff.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
configvaluesmodellrepochsdrop pathlr decaylayersglobal window indexwindow size
optimizerAdamWOpenAI CLIP-L1e-41000.40.824(5, 11, 17, 23)14
optimizer momentum(0.9, 0.999)MetaCLIP-L1e-41000.40.824(5, 11, 17, 23)14
weight decay0.1MetaCLIP-G5e-5750.50.948(11, 23, 35, 47)14
learning rateSigLIP-so1e-41000.40.827(2, 10, 18, 26)14
learning rate scheduleStep-wise decayEVA02-L1e-41000.40.824(5, 11, 17, 23)14
learning rate decayMAE-L1e-41000.40.824(5, 11, 17, 23)14
batch size64SigLIP2-so1e-41000.40.827(2, 10, 18, 26)14
image size1024 × 1024SigLIP2-g5e-5750.50.940(9, 19, 29, 39)14
augmentationLSJ [0.1, 2.0]DINOv2-L1e-41000.40.824(5, 11, 17, 23)32
epochsDINOv2-g5e-5360.50.940(9, 19, 29, 39)32
drop pathPEcoreG5e-5750.50.950(12, 24, 36, 49)32
positional embeddingabswin [7]PEspatialG5e-5360.50.950(12, 24, 36, 49)32
patch size16
window size
global window index
", + "bbox": [ + 151, + 726, + 844, + 887 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Table 24 Settings for End-to-End Finetuning Detection and Segmentation.", + "bbox": [ + 264, + 892, + 732, + 905 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "B.3.5 System-Level Comparison on Detection", + "text_level": 1, + "bbox": [ + 109, + 80, + 439, + 95 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We describe our implementation for system-level comparison to the state-of-the-arts on COCO object detection in Tab 15. Our implementation is based on the DETA repository8. We replace the vision encoder with our $\\mathrm{PE}_{\\mathrm{spatial}}$ and maintain the same hyperparameters as in the end-to-end finetuning settings, while keeping the detector unchanged. The training process consists of three stages:", + "bbox": [ + 109, + 104, + 588, + 195 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/6165b8d321ec714c5d44d432ac6923a8fa593d185ccc1fa1cd8b55f45852f7e8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Test-Time AugAPbox
No TTA65.2
+ More Queries65.3
+ SoftNMS [6]65.8
+ Flip Aug65.8
+ Multiscale Aug66.0
", + "bbox": [ + 651, + 83, + 846, + 148 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initial Training: Train on Objects365 for 12 epochs with an image resolution of $1024 \\times 1024$ , a total batch size of 256, and a learning rate of 2e-4, which is divided by 10 at the 10th epoch.", + "2. Increasing Resolution: Continue training on Objects365 for 6 epochs with a resolution of $1536 \\times 1536$ , a total batch size of 128, and a learning rate of 5e-5, which is divided by 10 at the 5th epoch.", + "3. Finetuning: Finetune on COCO dataset for 12 epochs with an image resolution of $1728 \\times 1728$ , a total batch size of 64, and a learning rate of 5e-5, which is divided by 10 at the 8th epoch.", + "4. Further Increasing Resolution: Further finetune on COCO dataset for 3 epochs with a resolution of $1824 \\times 1824$ , a total batch size of 64. To save GPU memory, we use SGD optimizer instead of Adam, with a learning rate of 5e-3, which is divided by 10 at the 2th epoch." + ], + "bbox": [ + 129, + 202, + 883, + 359 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We apply a series of test-time augmentation techniques to further improve the performance, detailed in Tab. 25.", + "bbox": [ + 109, + 368, + 883, + 397 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "C Additional Results", + "text_level": 1, + "bbox": [ + 109, + 422, + 331, + 438 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "C.1 PEcore: Robust Image Pretraining", + "text_level": 1, + "bbox": [ + 109, + 455, + 426, + 473 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "In Tab. 26, we present the raw data for the robustness metrics in Fig. 2. Across the board, each change improved almost all metrics (with the exception of progressive resolution slightly hurting the average and mask regularization slightly hurting ImageNet Adversarial). The fact that there were no tradeoffs to these changes, indicate that their improvements to the features are general. This could be why most of these changes improved performance for downstream tasks as well.", + "bbox": [ + 109, + 479, + 885, + 556 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Note that in §2.1, we only discuss changes that we know to work. There are several changes that we have tried that do not work (i.e., do not improve performance or lower performance). For instance: average pooling instead of using a class token, increasing the text tower size, using hue or contrast jitter, and maintaining the same resolution throughout training but dropping tokens instead of progressive resolution (FLIP-style).", + "bbox": [ + 109, + 564, + 883, + 625 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We also find increasing batch size and increasing training iterations for an L scale model to have equivalent effects. This is in contrast to the batch size scaling observed by [160], but it is possible that this difference is down to a hyperparameter issue.", + "bbox": [ + 109, + 631, + 883, + 678 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/ddb05a6977bad6a63462785d89e782e38ab42d14be63d629f21a1828f6517a27.jpg", + "table_caption": [ + "Table 25 Test-Time Aug for system-level comparison on COCO in Tab. 15." + ], + "table_footnote": [], + "table_body": "
StepZero-Shot Classification
Avg Class.ImageNet w1/2[26]ImageNet v2[12]ObjectNet IN Classes [4]ImageNet Adversarial [47]ImageNet Reminims [46]ImageNet Sketch [143]
1Baseline75.378.971.973.768.391.167.8
2Progressive Resolution75.178.971.872.469.990.567.0
3High Batch Size76.279.572.874.171.891.068.1
4LAMB and High LR76.979.973.374.373.591.568.6
5High Resolution (336)78.380.473.875.679.292.068.8
62D RoPE79.280.774.177.480.992.769.4
7Attention Pooling80.181.074.878.482.993.469.9
8Data Augmentation80.881.175.280.883.193.571.2
9Mask Regularization80.981.375.380.982.893.871.2
", + "bbox": [ + 310, + 689, + 692, + 845 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Table 26 Robust Image Pretraining Full Results. Raw results for the robustness metrics metrics in Fig. 2. Almost every change improves every metric, but some metrics are improved more than others (e.g., ObjectNet and ImageNet-A).", + "bbox": [ + 109, + 848, + 883, + 876 + ], + "page_idx": 29 + }, + { + "type": "page_footnote", + "text": "8https://github.com/jozhang97/DETA", + "bbox": [ + 129, + 893, + 370, + 907 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 936, + 508, + 948 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "C.2 $\\mathsf{PE}_{\\mathrm{core}}$ : Video Data Scaling", + "text_level": 1, + "bbox": [ + 109, + 79, + 374, + 97 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/389c8fd6f1342ba0acefdf83153292853134b64b77cc3dafeca0867b5135efc2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Video Data SizeAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet w1 [20]ImageNet v2 [112]ObjectNet In Classes [4]ImageNet adversarial [47]MS-COCO ts→img [76]MS-COCO img→ts [76]MS-COCO ts→img [76]Average VideoKeritics 400 [55]Keritics 600 [55]Keritics 700 [55]UCF 101 [126]HMDB 51 [62]MSR-VTT ts→vid [153]MSR-VTT vid→ts [153]
0M77.083.978.686.690.352.170.357.070.369.461.678.547.440.531.4
3M77.784.178.886.690.953.374.261.672.472.264.288.553.842.837.6
6M78.084.279.086.791.154.072.763.673.573.466.088.954.644.943.6
8M78.484.279.287.091.654.973.664.874.574.567.789.555.346.945.5
11M78.684.279.287.291.855.473.865.275.175.067.689.755.647.745.8
14M78.884.279.287.591.955.774.365.575.475.367.989.955.847.846.3
17M78.984.279.287.792.055.874.365.875.775.568.290.256.048.346.7
", + "bbox": [ + 261, + 109, + 733, + 239 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The detailed video data scaling results are presented in Tab. 27. Our experiments demonstrate that increasing the number of synthetic video data generated by the proposed video data engine enhances the performance of classification and retrieval on both image and video benchmarks. On image benchmarks, while improvements on ImageNet val and v2 plateaued earlier compared to ObjectNet and ImageNet Adversarial, MS-COCO retrieval performance continued to show gains. On video benchmarks, scaling synthetic video data consistently yields better performance for both classification and retrieval tasks. We expect that further scaling up the video data with our video data engine will continue to drive performance improvements.", + "bbox": [ + 109, + 285, + 885, + 391 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "C.3 $\\mathsf{PE}_{\\mathrm{core}}$ : Smaller Models", + "text_level": 1, + "bbox": [ + 109, + 409, + 346, + 426 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/3cde832c80ead650dd1257b3e36e558af544c1df53f380fb5c7963a9230eccf0.jpg", + "table_caption": [ + "Table 27 Scaling Video Data. Increasing the number of synthetic video data generated by our proposed video data engine consistently enhances the performance of image and video classification and retrieval tasks." + ], + "table_footnote": [], + "table_body": "
ModelTeacher's TempModel ScaleZero-Shot Classification
Avg Class.ImageNet val [26]ImageNet v2 [112]ObjectNet JV Classes [4]ImageNet Adversarial [47]ImageNet Renditions [46]ImageNet Sketch [143]
vanilla pretrained model-B66.274.267.462.550.283.059.8
distillation×2B65.271.865.561.450.283.658.6
×1B68.074.968.164.754.185.361.1
×0.7B68.275.168.265.354.485.161.3
×0.5B68.375.268.265.354.285.261.4
", + "bbox": [ + 282, + 441, + 720, + 556 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Table 28 Ablation Study on Teacher's Distribution Temperature. We evaluate the effect of varying temperatures on the teacher's distribution, using a pretrained vanilla CLIP model (ViT-B/14, resolution 224) as a baseline (details in §2.1). The models are finetuned via distillation with a short schedule of 50K steps.", + "bbox": [ + 109, + 560, + 885, + 602 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Ablation: Distillation Temperature. To optimize the performance of smaller models (B and L-scales in Tab. 4), we utilize a distillation finetuning approach with $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ as the teacher model. During this process, both student and teacher models encode image and text inputs to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence loss on both image-to-text and text-to-image similarity distributions.", + "bbox": [ + 109, + 619, + 885, + 696 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "We find that using a fixed and smaller temperature (i.e., higher logit scale), which controls the range of logits in the softmax, significantly enhances the effectiveness of distillation. This results in a sharper distribution for the teacher's distributions. In contrast, the student's temperature remains learnable, consistent with our pretraining procedure and CLIP training.", + "bbox": [ + 109, + 702, + 885, + 763 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In Tab. 28, we present an ablation study examining the impact of temperature on the teacher's distribution. For this analysis, we utilize a pretrained vanilla CLIP model (ViT-B/14, resolution 224), which serves as a baseline for comparison (see §2.1 for details). The models are finetuned using distillation with a concise schedule of 50K steps. Notably, our results show that employing a smaller temperature for the teacher's distributions yields improved performance on zero-shot ImageNet benchmarks.", + "bbox": [ + 109, + 771, + 887, + 847 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Building strong smaller models. In Tab. 29, we demonstrate our step-by-step training strategy for building strong smaller models at the L scale, as discussed in §2.4. Specifically, we outline our approach to image pretraining, image distillation, and video finetuning, and distillation. Leveraging the robust foundation established by our", + "bbox": [ + 109, + 858, + 887, + 905 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 936, + 506, + 949 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/abc1114a11768f47e364fe16d4aef24261b196f0d49fc64674d66412edf9825b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelStageImage Zero-ShotVideo Zero-Shot
Average ImageImageNetv1 [26]ImageNetv2 [112]ObjectNetIN Classes [4]ImageNetAdversarial [47]MS-COCOv1→v1img [76]MS-COCOimg→v1img [76]Average VideoKinetics400 [55]Kinetics600 [53]Kinetics700 [55]UCF101 [126]HMDB 51 [62]MS-RVTTv1→v1v1d [153]MS-RVTTv1→v1v1d [153]
SigLIP2-L/16 [138]-76.083.177.484.484.355.371.456.265.362.556.886.749.341.531.4
PEcoreLimage pretraining75.182.976.881.885.653.070.459.068.067.758.585.557.742.033.4
PEcoreL+image distillation from PEcoreG77.683.678.184.488.956.074.764.573.072.664.886.558.047.948.4
PEcoreL+video finetuning78.083.577.984.789.057.175.965.373.472.765.387.158.550.350.1
", + "bbox": [ + 148, + 78, + 846, + 181 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "pretraining techniques (§2.1), we show that distilling from $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ , our strongest unified perception encoder, yields improvements on both image and video benchmarks. Furthermore, a short-scheduled video finetuning provides an additional boost in performance on both benchmarks.", + "bbox": [ + 109, + 253, + 885, + 299 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "C.4 $\\mathsf{PE}_{\\mathrm{lang}}$ : Additional Results", + "text_level": 1, + "bbox": [ + 109, + 316, + 370, + 335 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Analogous to Tab. 10, in Tab. 30, we compare $\\mathrm{PE}_{\\mathrm{core}}$ and $\\mathrm{PE}_{\\mathrm{lang}}$ with dynamic resolution setting [77, 82]. More specifically, we use up to 4 tiles, following after a thumbnail, which is a whole image resized into $448 \\times 448$ . With the maximum number of tiles of 4, the model can cover $\\{1 \\times 1, 1 \\times 2, 1 \\times 3, 1 \\times 4, 2 \\times 1, 2 \\times 2, 3 \\times 1, 4 \\times 1\\}$ tile ratios. Similar to the Tab. 10, 11, 12 in the main paper, we show that $\\mathrm{PE}_{\\mathrm{lang}}$ largely outperforms the baseline vision encoders by large margins across all categories of MLLM tasks. Note that $\\mathrm{PE}_{\\mathrm{lang}}$ has been alignment-tuned with native resolution input, as opposed to e.g., InternViT 2.5, which has been midtrained with dynamic tiling, which shows $\\mathrm{PE}_{\\mathrm{lang}}$ 's strong generality for different input formats.", + "bbox": [ + 109, + 340, + 887, + 449 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Next, in Tab. 31, 32, 33, we show the breakdowns of RefCOCO/+/g [56] with Llama 3.1-instruct 8B as language model, Qwen2.5 LM 7B as language model, and with Llama 3.1-instruct 8B and dynamic tiling $(4 + 1)$ , respectively. In our SFT data, we have VisualGenome [60], DCI [139], and Flickr30K [103] as grounding datasets, and RefCOCO/+/g are unseen. We therefore report zero-shot performance of the MLLMs to evaluate spatial understanding capability of the vision encoders. Overall, $\\mathrm{PE}_{\\mathrm{lang}}$ L or G show the best performance across all RefCOCO splits, except with Qwen2.5 LM. This is because (1) InternViT 2.5 6B is midtrained with Qwen2 LM, and (2) during pre/mid-training the training data of RefCOCO/+/g are seen.", + "bbox": [ + 109, + 454, + 887, + 561 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/b2f874bec500ec2a5eb8600f35c03af98da22cff32721a4b4f68cdca969810fc.jpg", + "table_caption": [ + "Table 29 Building Strong Smaller Models. This table illustrates the step-by-step process of developing the $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}336\\mathrm{px}$ model, as outlined in §2.4. Starting with the pretrained $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}$ , both image distillation, along with video finetuning, enhance performance across image and video benchmarks, resulting in a unified L-scale model." + ], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolution Patch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [1]Avg. Ground. RetCOCOg+ [58]Avg. VideoVdeoMME Acc [38]STAR Acc [148]TCIF-QA Acc [53]EgoSchema Acc [89]MVBench Acc [68]PerceptionTest Acc [105]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1461.871.162.540.273.374.665.364.988.579.8113.490.4133.5116.267.148.044.847.162.739.046.048.3
MetaCLIP-G [152]1.8B224/1460.368.161.339.172.874.965.465.988.280.1114.291.8134.4116.566.049.046.546.562.545.044.748.9
PElang G†1.7B*224/1470.279.879.147.574.676.070.664.388.380.6116.392.0136.4120.569.556.649.055.969.961.250.053.6
576 Tokens per Tile
CLIP [106]0.3B336/1469.676.878.250.372.976.371.864.988.080.4114.090.9134.4116.668.550.846.652.265.044.646.349.9
AIMv2-L [37]0.3B336/1466.774.174.945.272.477.473.565.689.081.7116.492.5137.1119.566.654.143.454.370.656.047.352.7
SigLIP2-so [138]0.4B384/1655.561.454.933.372.376.570.166.088.681.2118.095.8138.3119.866.554.344.952.866.858.649.653.3
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1477.582.188.561.877.479.780.266.489.882.5120.397.4140.2123.271.959.849.462.774.164.053.155.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1656.966.056.534.370.976.469.966.288.481.2117.894.7137.8120.967.846.247.044.966.739.234.545.1
PEcoreL0.3B448/1467.172.478.346.471.276.474.063.788.879.0113.991.5134.5115.762.951.447.051.262.749.647.850.1
PElang L0.3B448/1478.382.889.365.275.978.578.864.489.681.3117.894.7138.1120.771.656.547.057.268.059.852.354.7
AIMv2 3B [37]2.7B448/1467.573.078.246.572.278.879.266.288.381.7119.095.8139.7121.565.154.049.655.467.349.649.952.5
InternViT2.5 6B [18]5.5B448/1467.474.674.347.672.975.971.364.887.779.7110.485.3132.5113.556.852.046.049.665.050.649.651.3
PEcoreG1.9B448/1468.073.481.247.669.776.474.362.589.179.6113.091.6134.5112.967.653.246.054.367.051.248.752.0
PElang G1.7B*448/1478.681.889.867.875.080.382.366.789.682.8119.695.2140.3123.471.859.049.661.873.960.052.656.3
", + "bbox": [ + 111, + 573, + 890, + 813 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Table 30 4+1 Tile Llama 8B MLLM Results. Llama 3.1-instruct 8B [82] is used as a language model. ${}^{*}\\mathrm{PE}_{\\mathrm{lang}}$ has 1.7B parameters since we discard the last 3 layers during language alignment. All MLLMs are trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of $448\\times 448$ (or the corresponding resolution for each encoder). The image tiles follow after a thumbnail input, similar to prior work [77]. Evaluation on an model that was interpolated without additional training (i.e., zero-shot resolution).", + "bbox": [ + 109, + 816, + 887, + 886 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 936, + 508, + 948 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/c4c7c849b0dc9295c39690b836481b60a614a3ea89eddad3e9fbbbcb72ed2aaa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolution Path SizeAvg. Ground.
RefCOCO val/ [56]RefCOCO testA [56]RefCOCO val/ [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1460.663.656.767.554.158.948.867.267.8
MetaCLIP-G [152]1.8B224/1460.562.056.567.853.558.749.268.268.3
PEiang G†1.7B*224/1465.767.764.470.958.362.056.673.274.4
576 Tokens per Image
CLIP [106]0.3B336/1465.066.761.471.657.662.554.573.272.8
AIMv2-L [37]0.3B336/1463.365.461.669.655.060.052.071.171.5
AIMv2-L Dist. [37]0.3B336/1462.664.861.069.454.459.051.370.870.0
SigLIP2-so [138]0.4B384/1667.468.866.571.060.361.858.576.276.0
SigLIP2-g-opt [138]1.1B384/1666.567.966.170.158.861.757.175.575.0
PEiang G†1.7B*336/1468.969.867.573.261.564.060.877.377.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1466.969.366.772.658.363.157.274.274.0
SigLIP2-so [138]0.4B512/1669.671.469.274.461.364.860.377.977.2
PEcore L0.3B448/1459.761.755.366.953.158.848.068.567.5
PEiang L0.3B448/1470.571.870.273.063.766.162.778.878.9
DINOv2 [98]1.1B448/1464.967.262.570.557.061.054.573.173.1
AIMv2 3B [37]2.7B448/1436.137.634.140.732.736.232.036.938.6
InternViT2.5 6B [18]5.5B448/1468.070.267.672.260.664.058.775.375.2
PEcore G1.9B448/1466.668.364.472.358.762.756.075.175.0
PEiang G1.7B*448/1471.371.969.975.164.267.363.079.479.2
", + "bbox": [ + 295, + 79, + 707, + 349 + ], + "page_idx": 32 + }, + { + "type": "table", + "img_path": "images/d146e5ba36590a72990779f2d2fff0d2f01b0733d571b364a0116dbed224b453.jpg", + "table_caption": [ + "Table 31 Llama MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used for zeroshot RefCOCO/+/g grounding." + ], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolutionPatch SizeAvg. Ground.
RefCOCO var[56]RefCOCO texA[56]RefCOCO var[56]RefCOCO+ texA[56]RefCOCO+ var[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1670.073.673.074.360.962.759.978.477.2
SigLIP2-g-opt [138]1.1B384/1669.973.372.473.660.562.360.778.478.2
PEiangG†1.7B*336/1470.173.472.075.362.064.261.278.477.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1468.172.469.174.159.362.456.675.275.5
SigLIP2-so [138]0.4B512/1670.574.173.774.461.762.961.078.677.9
PEcoreL0.3B448/1466.570.467.871.557.761.156.275.875.3
PEiangL0.3B448/1470.474.472.674.662.264.062.079.078.7
DINOv2 [98]1.1B448/1469.373.471.173.960.063.959.076.476.7
AIMv2 3B [37]2.7B448/1467.671.467.772.359.261.256.376.476.4
InternViT2.5 6B‡ [18]5.5B448/1472.877.776.577.163.666.062.280.079.5
PEcoreG1.9B448/1470.574.071.875.861.564.860.178.577.3
PEiangG1.7B*448/1472.175.472.976.364.265.962.979.779.7
", + "bbox": [ + 295, + 378, + 707, + 578 + ], + "page_idx": 32 + }, + { + "type": "table", + "img_path": "images/c316ddc2973a2c132e703a17948017da60b2c81884dbd22cf8abbd5cd3d8dd51.jpg", + "table_caption": [ + "Table 32 Qwen MLLM-Based Zereshot RefCOCO. QwenLM 2.5 7B [155] is used as a language model. All MLLMs report zereshot results on RefCOCO/+/g datasets. $\\ddagger$ Trained with RefCOCO/+/g beforehand." + ], + "table_footnote": [], + "table_body": "
ModelEncoder ParamsResolutionAvg. Ground.Grounding
RefCOCORefCOCORefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+
val [56]val [56]val [56]val [56]val [56]val [56]val [56]val [56]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1467.169.365.073.260.564.956.574.373.4
MetaCLIP-G [152]1.8B224/1466.067.963.271.959.262.955.873.873.1
PElang G†1.7B*224/1470.371.669.673.763.366.262.678.678.2
576 Tokens per Tile
CLIP [106]0.3B336/1468.570.766.674.161.165.958.176.075.1
AIMv2-L [37]0.3B336/1466.668.465.571.459.363.456.574.274.2
SigLIP2-so [138]0.4B384/1666.567.966.170.158.861.757.175.575.0
SigLIP2-g-opt [138]1.1B384/1666.568.265.670.159.062.358.074.874.0
PElang G†1.7B*336/1471.973.671.574.964.867.363.980.480.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1667.869.267.871.259.962.559.076.976.0
PEcoreL0.3B448/1462.965.359.969.256.662.252.070.170.0
PElang L0.3B448/1471.673.070.874.365.267.262.979.779.7
AIMv2 3B [37]2.7B448/1465.166.962.971.158.162.455.671.872.2
InternViT2.5 B‡ [18]5.5B448/1456.861.056.465.851.057.046.158.058.9
PEcoreG1.9B448/1467.669.265.872.459.964.158.375.175.6
PElang G1.7B*448/1471.872.670.774.664.866.664.680.480.3
", + "bbox": [ + 295, + 621, + 707, + 862 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Table 33 4+1 Tile Llama 8B MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used as a language model. All trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of the encoder's native resolution, with a thumbnail image in front, similar to prior work [77]. ${}^{ \\ddagger }$ Trained with RefCOCO/+/g beforehand.", + "bbox": [ + 109, + 864, + 883, + 907 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "C.5 PEspatial: Additional Qualitative Results", + "text_level": 1, + "bbox": [ + 111, + 79, + 477, + 97 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/a2b482a782d2db69b553dc95ebc085fdd0e0dfdd61c5f58feb5493e4e9b8bf2f.jpg", + "image_caption": [ + "Figure 20 More Visualizations of the feature space following Fig. 17. After the image itself, column 1 is $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ last layer features, column 2 is $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ aligned to its own layer 41, column 3 is $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ aligned to SAM 2.1-L [111] mask logits, and column 4 is $\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}$ aligned to both, denoted $\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}$ . See §B.3.2 for visualization method." + ], + "image_footnote": [], + "bbox": [ + 112, + 109, + 495, + 758 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/775649cb979ad831b819e01ac5e03a0dcd2653c5882b8a59349bf10a1ceb5b89.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 111, + 883, + 756 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 80, + 227, + 97 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In ICCV, 2019. 14, 15, 16, 32", + "[2] Pravesh Agrawal, Szymon Antoniak, Emma Bou Hanna, Baptiste Bout, Devendra Chaplot, Jessica Chudnovsky, Diogo Costa, Baudouin De Monicault, Saurabh Garg, Theophile Gervet, Soham Ghosh, Amélie Héliou, Paul Jacob, Albert Q. Jiang, Kartik Khandelwal, Timothee Lacroix, Guillaume Lample, Diego Las Casas, Thibaut Lavril, Teven Le Scao, Andy Lo, William Marshall, Louis Martin, Arthur Mensch, Pavankumar Muddireddy, Valera Nemychnikova, Marie Pellat, Patrick Von Platen, Nikhil Raghuraman, Baptiste Rozière, Alexandre Sablayrolles, Lucile Saulnier, Romain Sauvestre, Wendy Shang, Roman Soletskyi, Lawrence Stewart, Pierre Stock, Joachim Studnia, Sandeep Subramanian, Sagar Vaze, Thomas Wang, and Sophia Yang. Pixtral 12b. arXiv:2410.07073, 2024. 20", + "[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv:2308.12966, 2023. 20", + "[4] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models. In NeurIPS, 2019. 3, 4, 6, 8, 9, 10, 30, 31, 32", + "[5] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, Thomas Unterthiner, Daniel Keysers, Skanda Koppula, Fangyu Liu, Adam Grycner, Alexey A. Gritsenko, Neil Houlsby, Manoj Kumar, Keran Rong, Julian Eisenschlos, Rishabh Kabra, Matthias Bauer, Matko Bosnjak, Xi Chen, Matthias Minderer, Paul Voigtlaender, Ioana Bica, Ivana Balazevic, Joan Puigcerver, Pinelopi Papalampidi, Olivier J. Henaff, Xi Xiong, Radu Soricut, Jeremiah Harmsen, and Xiaohua Zhai. PaliGemma: A versatile 3b VLM for transfer. arXiv:2407.07726, 2024. 20", + "[6] Navaneeth Bodla, Bharat Singh, Rama Chellappa, and Larry S Davis. Soft-NMS-Improving object detection with one line of code. In ICCV, 2017. 30", + "[7] Daniel Bolya, Chaitanya Ryali, Judy Hoffman, and Christoph Feichtenhofer. Window attention is bugged: how not to interpolate position embeddings. In *ICLR*, 2023. 11, 29", + "[8] Florian Bordes, Randall Balestriero, Quentin Garrido, Adrien Bardes, and Pascal Vincent. Guillotine regularization: Why removing layers is needed to improve generalization in self-supervised learning. arXiv:2206.13378, 2022. 20", + "[9] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - Mining discriminative components with random forests. In ECCV, 2014. 9", + "[10] Gary Bradski. The OpenCV library. Dr. Dobb's Journal: Software Tools for the Professional Programmer, 2000. 22", + "[11] Zhaowei Cai and Nuno Vasconcelos. Cascade R-CNN: Delving into high quality object detection. In CVPR, 2018. 19", + "[12] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020. 19", + "[13] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D. Manning. AuroraCap: Efficient, performant video detailed captioning and a new benchmark. In ICLR, 2025. 5", + "[14] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. Hybrid task cascade for instance segmentation. In CVPR, 2019. 19", + "[15] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pretraining from pixels. In ICML, 2020. 20", + "[16] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020. 20" + ], + "bbox": [ + 120, + 113, + 887, + 891 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, Alexander Kolesnikov, Joan Puigcerver, Nan Ding, Keran Rong, Hassan Akbari, Gaurav Mishra, Linting Xue, Ashish Thapliyal, James Bradbury, Weicheng Kuo, Mojtaba Seyedhosseini, Chao Jia, Burcu Karagol Ayan, Carlos Riquelme, Andreas Steiner, Anelia Angelova, Xiaohua Zhai, Neil Houlsby, and Radu Soricut. Pali: A jointly-scaled multilingual language-image model. In ICLR, 2023. 8, 9", + "[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, Lixin Gu, Xuehui Wang, Qingyun Li, Yimin Ren, Zixuan Chen, Jiapeng Luo, Jiahao Wang, Tan Jiang, Bo Wang, Conghui He, Botian Shi, Xingcheng Zhang, Han Lv, Yi Wang, Wenqi Shao, Pei Chu, Zhongying Tu, Tong He, Zhiyong Wu, Huipeng Deng, Jiaye Ge, Kai Chen, Kaipeng Zhang, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv:2412.05271, 2024. 11, 15, 16, 20, 32, 33", + "[19] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyuan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. InternVL: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024. 1, 6, 7, 9, 10, 20, 26", + "[20] Gong Cheng, Junwei Han, and Xiaoqiang Lu. Remote sensing image scene classification: Benchmark and state of the art. Proceedings of the IEEE, 2017. 9", + "[21] Jang Hyun Cho, Andrea Madotto, Effrosyni Mavroudi, Triantafyllos Afouras, Tushar Nagarajan, Muhammad Maaz, Yale Song, Tengyu Ma, Shuming Hu, Hanoona Rasheed, Peize Sun, Po-Yao Huang, Daniel Bolya, Suyog Jain, Miguel Martin, Huiyu Wang, Nikhila Ravi, Shashank Jain, Temmy Stark, Shane Moon, Babak Damavandi, Vivian Lee, Andrew Westbury, Salman Khan, Philipp Krahenbuhl, Piotr Dólar, Lorenzo Torresani, Kristen Grauman, and Christoph Feichtenhofer. Perceptionlm: Open-access data and models for detailed visual understanding. arXiv:2504.13180, 2025. 2, 5, 11, 14, 15, 16, 21", + "[22] Seokju Cho, Heeseong Shin, Sunghwan Hong, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. CAT-Seg: Cost aggregation for open-vocabulary semantic segmentation. In CVPR, 2024. 20", + "[23] Timothee Darcet, Maxime Oquab, Julien Mairal, and Piotr Bojanowski. Vision transformers need registers. In ICLR, 2024. 12, 17", + "[24] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdulmohsin, Rodolphe Jenatton, Lucas Beyer, Michael Tschannen, Anurag Arnab, Xiao Wang, Carlos Riquelme, Matthias Minderer, Joan Puigcerver, Utku Evci, Manoj Kumar, Sjoerd van Steenkiste, Gamaleldin F. Elsayed, Aravindh Mahendran, Fisher Yu, Avital Oliver, Fantine Huot, Jasmijn Bastings, Mark Patrick Collier, Alexey Gritsenko, Vighnesh Birodkar, Cristina Vasconcelos, Yi Tay, Thomas Mensink, Alexander Kolesnikov, Filip Pavetic, Dustin Tran, Thomas Kipf, Mario Lučić, Xiaohua Zhai, Daniel Keysers, Jeremiah Harmsen, and Neil Houlsby. Scaling vision transformers to 22 billion parameters. In ICML, 2023. 1, 9", + "[25] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weihs, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv:2409.17146, 2024. 16", + "[26] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3, 6, 8, 9, 10, 30, 31, 32", + "[27] Karan Desai and Justin Johnson. VirTex: Learning visual representations from textual annotations. In CVPR, 2021. 20", + "[28] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 20", + "[29] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1, 8, 9" + ], + "bbox": [ + 117, + 80, + 885, + 905 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pre-training of large autoregressive image models. In ICML, 2024. 20", + "[31] David Fan, Shengbang Tong, Jiachen Zhu, Koustuv Sinha, Zhuang Liu, Xinlei Chen, Michael Rabbat, Nicolas Ballas, Yann LeCun, Amir Bar, and Saining Xie. Scaling language-free visual representation learning. arXiv:2504.01017, 2025. 12, 13", + "[32] Lijie Fan, Dilip Krishnan, Phillip Isola, Dina Katabi, and Yonglong Tian. Improving CLIP training with language rewrites. In NeurIPS, 2023. 20", + "[33] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. In ICLR, 2024. 1, 3, 9, 16, 20, 26", + "[34] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA: Exploring the limits of masked visual representation learning at scale. In CVPR, 2023. 1", + "[35] Yuxin Fang, Quan Sun, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA-02: A visual representation for neon genesis. Image and Vision Computing, 2024. 1, 19", + "[36] Christoph Feichtenhofer. X3D: Expanding architectures for efficient video recognition. In CVPR, 2020. 4", + "[37] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T. Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders. In CVPR, 2025. 1, 2, 10, 11, 15, 16, 19, 20, 32, 33", + "[38] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-MME: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv:2405.21075, 2024. 14, 15, 16, 32", + "[39] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, Eyal Orgad, Rahim Entezari, Giannis Daras, Sarah Pratt, Vivek Ramanujan, Yonatan Bitton, Kalyani Marathe, Stephen Mussmann, Richard Vencu, Mehdi Cherti, Ranjay Krishna, Pang Wei Koh, Olga Saukh, Alexander Ratner, Shuran Song, Hannaneh Hajishirzi, Ali Farhadi, Romain Beaumont, Sewoong Oh, Alex Dimakis, Jenia Jitsev, Yair Carmon, Vaishaal Shankar, and Ludwig Schmidt. DataComp: In search of the next generation of multimodal datasets. In NeurIPS, 2023. 10, 20", + "[40] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in VQA matter: Elevating the role of image understanding in visual question answering. In CVPR, 2017. 14, 15, 16, 32", + "[41] Agrim Gupta, Piotr Dollar, and Ross Girshick. LVIS: A dataset for large vocabulary instance segmentation. In CVPR, 2019. 19", + "[42] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1", + "[43] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask R-CNN. In ICCV, 2017. 11, 12, 19, 29", + "[44] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022. 1, 19", + "[45] Greg Heinrich, Mike Ranzinger, Hongxu, Yin, Yao Lu, Jan Kautz, Andrew Tao, Bryan Catanzaro, and Pavlo Molchanov. RADIOv2.5: Improved baselines for agglomerative vision foundation models. In CVPR, 2025. 1, 10, 18", + "[46] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In ICCV, 2021. 3, 8, 9, 30, 31", + "[47] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In CVPR, 2021. 3, 4, 8, 9, 30, 31, 32", + "[48] Byeongho Heo, Song Park, Dongyoon Han, and Sangdoo Yun. Rotary position embedding for vision transformer. In ECCV, 2024. 20" + ], + "bbox": [ + 119, + 80, + 885, + 882 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[49] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. In NeurIPS Deep Learning Workshop, 2015. 8", + "[50] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In ECCV, 2016. 14, 17", + "[51] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. OpenCLIP, 2021. 3, 20", + "[52] Allan Jabri, Andrew Owens, and Alexei Efros. Space-time correspondence as a contrastive random walk. In NeurIPS, 2020. 11, 19, 29", + "[53] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. TGIF-QA: Toward spatio-temporal reasoning in visual question answering. In CVPR, 2017. 14, 15, 16, 32", + "[54] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 20", + "[55] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset. arXiv:1705.06950, 2017. 6, 9, 31, 32", + "[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 14, 15, 16, 32, 33", + "[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 14, 15, 16, 32", + "[58] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dolkar, and Ross Girshick. Segment anything. In ICCV, 2023. 5, 18", + "[59] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV Workshop, 2013. 9", + "[60] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV, 2017. 27, 32", + "[61] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In NeurIPS, 2012. 1", + "[62] Hildegard Kuehne, Hueihan Jhuang, Estfbaliz Garrote, Tomaso Poggio, and Thomas Serre. HMDB: a large video database for human motion recognition. In ICCV, 2011. 9, 31, 32", + "[63] Weicheng Kuo, Yin Cui, Xiuye Gu, A. J. Piergiovanni, and Anelia Angelova. F-VLM: open-vocabulary object detection upon frozen vision and language models. In ICLR, 2023. 20", + "[64] Zhengfeng Lai, Haotian Zhang, Bowen Zhang, Wentao Wu, Haoping Bai, Aleksei Timofeev, Xianzhi Du, Zhe Gan, Jiulong Shan, Chen-Nee Chuah, Yinfei Yang, and Meng Cao. VeCLIP: Improving CLIP training via visual-enriched captions. In ECCV, 2024. 5, 20", + "[65] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? In NeurIPS, 2024. 27", + "[66] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. LLaVA-OneVision: Easy visual task transfer. TMLR, 2025. 16, 20, 22", + "[67] Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In ICCV, 2023. 9", + "[68] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, Limin Wang, and Yu Qiao. MVBench: A comprehensive multi-modal video understanding benchmark. In CVPR, 2024. 14, 15, 16, 32" + ], + "bbox": [ + 119, + 80, + 885, + 875 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[69] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 1", + "[70] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for CLIP training. In NeurIPS, 2023. 3", + "[71] Xianhang Li, Zeyu Wang, and Cihang Xie. CLIPA-v2: Scaling CLIP training with 81.1% zero-shot imagenet accuracy within a $10,000 budget; an extra $4,000 unlocks 81.8% accuracy. arXiv:2306.15658, 2023. 3, 20", + "[72] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. In ECCV, 2022. 11, 19, 29", + "[73] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, 2023. 14, 15, 16, 32", + "[74] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In CVPR, 2023. 20", + "[75] Zhenyu Li, Xuyang Wang, Xianming Liu, and Junjun Jiang. Binsformer: Revisiting adaptive bins for monocular depth estimation. TIP, 2024. 29", + "[76] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In ECCV, 2014. 2, 6, 9, 12, 14, 15, 16, 19, 27, 31, 32", + "[77] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. LLaVA-NeXT: Improved reasoning,OCR, and world knowledge, 2024. 32, 33", + "[78] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. NeurIPS, 2024. 20, 23", + "[79] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 3, 19", + "[80] Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, and Baining Guo. Swin transformer v2: Scaling up capacity and resolution. In CVPR, 2022. 19", + "[81] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. In CVPR, 2022. 1", + "[82] AI @ Meta Llama Team. The llama 3 herd of models. arXiv:2407.21783, 2024. 5, 14, 15, 16, 20, 32, 33", + "[83] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. *ICLR*, 2019. 3, 29", + "[84] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. CLIP4Clip: An empirical study of clip for end to end video clip retrieval. Neurocomputing, 2021. 6, 9", + "[85] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. SiT: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In ECCV, 2024. 20", + "[86] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-ChatGPT: Towards detailed video understanding via large vision and language models. In ACL, 2024. 5", + "[87] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. VideoGPT+: Integrating image and video encoders for enhanced video understanding. arXiv:2406.09418, 2024. 5", + "[88] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arxiv:1306.5151, 2013. 9", + "[89] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. NeurIPS, 2024. 14, 15, 16, 32", + "[90] Kevis-Kokitsi Maninis, Kaifeng Chen, Soham Ghosh, Arjun Karpur, Koert Chen, Ye Xia, Bingyi Cao, Daniel Salz, Guangxing Han, Jan Dlabal, et al. Tips: Text-image pretraining with spatial awareness. arXiv:2410.16512, 2024. 1", + "[91] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. DocVQA: A dataset for vqa on document images. In WACV, 2021. 14, 15, 16, 32", + "[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographics. In WACV, 2022. 14, 15, 16, 32" + ], + "bbox": [ + 117, + 80, + 885, + 906 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[93] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, Anton Belyi, Haotian Zhang, Karanjeet Singh, Doug Kang, Ankur Jain, Hongyu He, Max Schwarzer, Tom Gunter, Xiang Kong, Aonan Zhang, Jianyu Wang, Chong Wang, Nan Du, Tao Lei, Sam Wiseman, Guoli Yin, Mark Lee, Zirui Wang, Ruoming Pang, Peter Grasch, Alexander Toshev, and Yinfei Yang. MM1: methods, analysis and insights from multimodal LLM pre-training. In ECCV, 2024. 20", + "[94] Matthias Minderer, Alexey A. Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. Simple open-vocabulary object detection with vision transformers. In ECCV, 2022. 1, 20", + "[95] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In NeurIPS, 2023. 20", + "[96] Thao Nguyen, Samir Yitzhak Gadre, Gabriel Ilharco, Sewoong Oh, and Ludwig Schmidt. Improving multimodal datasets with image captioning. In NeurIPS, 2023. 5, 20", + "[97] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In ICVGIP, 2008. 9", + "[98] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Mido Assran, Nicolas Ballas, Wojciech Galuba, Russell Howes, Po-Yao Huang, Shang-Wen Li, Ishan Misra, Michael Rabbat, Vasu Sharma, Gabriel Synnaeve, Hu Xu, Hervé Jégou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision. TMLR, 2024. 1, 2, 10, 11, 15, 16, 18, 19, 20, 22, 29, 33", + "[99] Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, and Philipp Krahenbuhl. NMSstrikes back. arXiv:2212.06137, 2022. 19", + "[100] Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In CVPR, 2012. 9", + "[101] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. arXiv:2306.14824, 2023. 20", + "[102] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Kenji Kawaguchi, Hanxiao Liu, Adams Wei Yu, Jiahui Yu, Yi-Ting Chen, Minh-Thang Luong, Yonghui Wu, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. Neurocomputing, 2023. 1, 9, 20", + "[103] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In ICCV, 2015. 27, 32", + "[104] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 DAVIS challenge on video object segmentation. arXiv:1704.00675, 2017. 19, 29", + "[105] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adriâ Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, Carl Doersch, Tatiana Matejovicova, Yury Sulsky, Antoine Miech, Alex Frechette, Hanna Klimczak, Raphael Koster, Junlin Zhang, Stephanie Winkler, Yusuf Aytar, Simon Osindero, Dima Damen, Andrew Zisserman, and João Carreira. Perception test: A diagnostic benchmark for multimodal video models. In NeurIPS, 2024. 14, 15, 16, 32", + "[106] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 3, 8, 9, 15, 16, 19, 20, 31, 32, 33", + "[107] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pre-training from videos. arXiv:2501.05453, 2025. 19, 20, 29", + "[108] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv:2204.06125, 2022. 1", + "[109] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In ICCV, 2021, 11, 19, 29", + "[110] Mike Ranzinger, Greg Heinrich, Jan Kautz, and Pavlo Molchanov. AM-RADIO: Agglomerative vision foundation model—reduce all domains into one. In CVPR, 2024. 1, 18, 21" + ], + "bbox": [ + 112, + 80, + 885, + 888 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[111] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. SAM 2: Segment anything in images and videos. In ICLR, 2024. 2, 5, 17, 18, 34", + "[112] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet? In ICML, 2019. 3, 6, 8, 9, 30, 31, 32", + "[113] William A. Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. The dollar street dataset: images representing the geographic and socioeconomic diversity of the world. In NeurIPS Datasets and Benchmarks, 2022. 10", + "[114] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2022. 1", + "[115] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In ECCV, 2020. 20", + "[116] Mert Bulent Sariyildiz, Philippe Weinzaepfel, Thomas Lucas, Diane Larlus, and Yannis Kalantidis. UNIC: Universal classification models via multi-teacher distillation. In ECCV, 2024. 18", + "[117] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In NeurIPS Datasets and Benchmarks, 2022. 20", + "[118] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-OKVQA: A benchmark for visual question answering using world knowledge. In ECCV, 2022. 14, 15, 16, 32", + "[119] Jinghuan Shang, Karl Schmeckpeper, Brandon B May, Maria Vittoria Minniti, Tarik Kelestemur, David Watkins, and Laura Herlant. Theia: Distilling diverse vision foundation models for robot learning. In CoRL, 2024. 18", + "[120] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In ICCV, 2019. 19", + "[121] Shashank Shekhar, Florian Bordes, Pascal Vincent, and Ari Morcos. Objectives matter: Understanding the impact of self-supervised objectives on vision transformer representations. arXiv:2304.13089, 2023. 20", + "[122] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension. In ECCV, 2020. 10", + "[123] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 19, 29", + "[124] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In ICLR, 2015. 1", + "[125] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards VQA models that can read. In CVPR, 2019. 14, 15, 16, 32", + "[126] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. UCF101: A dataset of 101 human actions classes from videos in the wild. arXiv:1212.0402, 2012. 9, 31, 32", + "[127] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. Neurocomputing, 2024. 4, 20, 25", + "[128] Lin Sun, Jiale Cao, Jin Xie, Xiaoheng Jiang, and Yanwei Pang. CLIPer: Hierarchically improving spatial representation of CLIP for open-vocabulary semantic segmentation. arXiv:2411.13836, 2024. 20", + "[129] Quan Sun, Yuxin Fang, Ledell Wu, Xinlong Wang, and Yue Cao. EVA-CLIP: Improved training techniques for clip at scale. arXiv:2303.15389, 2023. 20", + "[130] Quan Sun, Jinsheng Wang, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, and Xinlong Wang. EVA-CLIP-18B: Scaling clip to 18 billion parameters. arXiv:2402.04252, 2024. 1, 9, 10, 20, 26", + "[131] Mingxing Tan and Quoc Le. EfficientNet: Rethinking model scaling for convolutional neural networks. In ICML, 2019. 1, 3, 4", + "[132] Gemma Team. Gemma 3 technical report. arXiv:2503.19786, 2025. 16, 20" + ], + "bbox": [ + 109, + 80, + 883, + 905 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[133] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100M: The new data in multimedia research. Communications of the ACM, 2016. 9", + "[134] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Ziteng Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024. 11, 20", + "[135] Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In ICCV, 2021. 14, 17", + "[136] Hugo Touvron, Matthieu Cord, and Hervé Jégou. DeiT III: Revenge of the ViT. In ECCV, 2022. 3", + "[137] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. In NeurIPS, 2023. 1, 20", + "[138] Michael Tschannen, Alexey Gritsenko, Xiao Wang, Muhammad Ferjad Naeem, Ibrahim Alabdulmohsin, Nikhil Parthasarathy, Talfan Evans, Lucas Beyer, Ye Xia, Basil Mustafa, Olivier Henaff, Jeremiah Harmsen, Andreas Steiner, and Xiaohua Zhai. SigLIP 2: Multilingual vision-language encoders with improved semantic understanding, localization, and dense features. arXiv:2502.14786, 2025. 2, 7, 8, 9, 10, 15, 16, 18, 19, 26, 32, 33", + "[139] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating CLIP-style models on dense captions. In CVPR, 2024. 27, 32", + "[140] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 10", + "[141] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 25", + "[142] Matthew Walmer, Saksham Suri, Kamal Gupta, and Abhinav Shrivastava. Teaching matters: Investigating the role of supervision in vision transformers. In CVPR, 2023. 20", + "[143] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In NeurIPS, 2019. 3, 8, 9, 30, 31", + "[144] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv:2409.12191, 2024. 16, 20", + "[145] Wenhai Wang, Jifeng Dai, Zhe Chen, Zhenhang Huang, Zhiqi Li, Xizhou Zhu, Xiaowei Hu, Tong Lu, Lewei Lu, Hongsheng Li, Xiaogang Wang, and Yu Qiao. InternImage: Exploring large-scale vision foundation models with deformable convolutions. In CVPR, 2023. 19", + "[146] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, Tianxiang Jiang, Songze Li, Jilan Xu, Hongjie Zhang, Yifei Huang, Yu Qiao, Yali Wang, and Limin Wang. InternVideo2: Scaling foundation models for multimodal video understanding. In ECCV, 2024. 2, 9", + "[147] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, 2022. 4, 17", + "[148] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. STAR: A benchmark for situated reasoning in real-world videos. In NeurIPS, 2021. 14, 15, 16, 32", + "[149] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detector2, 2019. 29", + "[150] Jianxiong Xiao, Krista A. Ehinger, James Hays, Antonio Torralba, and Aude Oliva. SUN database: Exploring a large collection of scene categories. IJCV, 2014. 9", + "[151] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen tau Yih, Shang-Wen Li, Saining Xie, and Christoph Feichtenhofer. Altogether: Image captioning via re-aligning alt-text. In EMNLP, 2024. 5, 20", + "[152] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. In ICLR, 2024. 1, 3, 8, 15, 19, 20, 32, 33" + ], + "bbox": [ + 112, + 80, + 883, + 888 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[153] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. MSR-VTT: A large video description dataset for bridging video and language. In CVPR, 2016. 6, 7, 31, 32", + "[154] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arxiv:2407.10671, 2024. 16", + "[155] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv:2412.15115, 2024. 16, 33", + "[156] Yang You, Jing Li, Sashank J. Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In ICLR, 2020. 3, 20", + "[157] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. TACL, 2014. 9, 14, 15, 16, 32", + "[158] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. CoCa: Contrastive captioners are image-text foundation models. TMLR, 2022. 1, 9, 20", + "[159] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. In ICLR, 2025, 20, 21", + "[160] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1, 4, 7, 9, 16, 19, 20, 22, 25, 26, 30", + "[161] Hao Zhang, Feng Li, Shilong Liu, Lei Zhang, Hang Su, Jun Zhu, Lionel M Ni, and Heung-Yeung Shum. DINO: DETR with improved denoising anchor boxes for end-to-end object detection. In ICLR, 2023. 19", + "[162] Richard Zhang, Phillip Isola, and Alexei A Efros. Colorful image colorization. In ECCV, 2016. 20", + "[163] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D. Manning, and Curtis P. Langlotz. Contrastive learning of medical visual representations from paired images and text. In MLHC, 2022. 20", + "[164] Long Zhao, Nitesh Bharadwaj Gundavarapu, Liangzhe Yuan, Hao Zhou, Shen Yan, Jennifer J. Sun, Luke Friedman, Rui Qian, Tobias Weyand, Yue Zhao, Rachel Hornung, Florian Schroff, Ming Yang, David A. Ross, Huisheng Wang, Hartwig Adam, Mikhail Sirotenko, Ting Liu, and Boqing Gong. VideoPrism: A foundational visual encoder for video understanding. In ICML, 2024. 9", + "[165] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. In WACV, 2025. 14, 15, 16, 32", + "[166] Liang Zheng, Yali Zhao, Shengjin Wang, Jingdong Wang, and Qi Tian. Good practice in cnn feature transfer. arXiv:1604.00133, 2016. 20", + "[167] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ADE20K dataset. In CVPR, 2017. 19, 29", + "[168] Jinguo Zhu, Weiyun Wang, Zhe Chen, Zhaoyang Liu, Shenglong Ye, Lixin Gu, Yuchen Duan, Hao Tian, Weijie Su, Jie Shao, Zhangwei Gao, Erfei Cui, Yue Cao, Yangzhou Liu, Weiye Xu, Hao Li, Jiahao Wang, Han Lv, Dengnian Chen, Songze Li, Yinan He, Tan Jiang, Jiapeng Luo, Yi Wang, Conghui He, Botian Shi, Xingcheng Zhang, Wenqi Shao, Junjun He, Yingtong Xiong, Wenwen Qu, Peng Sun, Penglong Jiao, Lijun Wu, Kaipeng Zhang, Huipeng Deng, Jiaye Ge, Kai Chen, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. InternVL3: Exploring advanced training and test-time recipes for open-source multimodal models. arxiv:2504.10479, 2025. 2, 16" + ], + "bbox": [ + 112, + 80, + 885, + 887 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 42 + }, + { + "type": "ref_text", + "text": "[169] Zhuofan Zong, Guanglu Song, and Yu Liu. DETRs with collaborative hybrid assignments training. In ICCV, 2023. 19", + "bbox": [ + 112, + 80, + 883, + 108 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 490, + 936, + 508, + 948 + ], + "page_idx": 43 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_model.json b/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_model.json new file mode 100644 index 0000000000000000000000000000000000000000..33c36f4cfd2618c57cfde6ba2678a1982fa7c4f8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_model.json @@ -0,0 +1,7790 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.261, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.13181v2 [cs.CV] 28 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.099, + 0.833, + 0.151 + ], + "angle": 0, + "content": "Perception Encoder: The best visual embeddings are not at the output of the network" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.155, + 0.86, + 0.203 + ], + "angle": 0, + "content": "Daniel Bolya\\(^{1,\\ast}\\), Po-Yao Huang\\(^{1,\\ast}\\), Peize Sun\\(^{1,\\ast}\\), Jang Hyun Cho\\(^{1,2,\\ast,\\dagger}\\), Andrea Madotto\\(^{1,\\ast}\\), Chen Wei\\(^{1}\\), Tengyu Ma\\(^{1}\\), Jiale Zhi\\(^{1}\\), Jathushan Rajasegaran\\(^{1}\\), Hanoona Rasheed\\(^{3,\\dagger}\\), Junke Wang\\(^{4,\\dagger}\\), Marco Monteiro\\(^{1}\\), Hu Xu\\(^{1}\\), Shiyu Dong\\(^{5}\\), Nikhila Ravi\\(^{1}\\), Daniel Li\\(^{1}\\), Piotr Dólár\\(^{1}\\), Christoph Feichtenhofer\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.208, + 0.7, + 0.24 + ], + "angle": 0, + "content": "\\(^{1}\\)Meta FAIR, \\(^{2}\\)UT Austin, \\(^{3}\\)MBZUAI, \\(^{4}\\)Fudan University, \\(^{5}\\)Meta Reality Labs *Joint first author, †Work done during internships at Meta" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.257, + 0.861, + 0.485 + ], + "angle": 0, + "content": "We introduce Perception Encoder (PE), a state-of-the-art vision encoder for image and video understanding trained via simple vision-language learning. Traditionally, vision encoders have relied on a variety of pretraining objectives, each tailored to specific downstream tasks such as classification, captioning, or localization. Surprisingly, after scaling our carefully tuned image pretraining recipe and refining with our robust video data engine, we find that contrastive vision-language training alone can produce strong, general embeddings for all of these downstream tasks. There is only one caveat: these embeddings are hidden within the intermediate layers of the network. To draw them out, we introduce two alignment methods: language alignment for multimodal language modeling, and spatial alignment for dense prediction. Together, our PE family of models achieves best-in-class results on a wide variety of tasks, including (1) zero-shot image and video classification and retrieval, simultaneously obtaining 86.6 average zero-shot ImageNet robustness and 76.9 zero-shot Kinetics-400 video classification; (2) document, image, and video Q&A, enabling 94.6 DocVQA, 80.9 InfographicVQA, and 82.7 PerceptionTest with an 8B LLM; and (3) spatial tasks such as detection, tracking, and depth estimation, setting a new COCO state-of-the-art of 66.0 box mAP. To foster further research, we release our models, code, and novel dataset of synthetically and human-annotated videos." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.503, + 0.573, + 0.517 + ], + "angle": 0, + "content": "Code: https://github.com/facebookresearch/perception_models" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.519, + 0.473, + 0.532 + ], + "angle": 0, + "content": "Dataset: https://ai.meta.com/datasets/pe-video/" + }, + { + "type": "text", + "bbox": [ + 0.785, + 0.519, + 0.86, + 0.534 + ], + "angle": 0, + "content": "Meta" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.573, + 0.272, + 0.59 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.602, + 0.888, + 0.664 + ], + "angle": 0, + "content": "For the last decade in computer vision, pretrained vision encoders have been the core building block for most applications requiring perception. From million-scale ImageNet [26] pretrained convolutional networks [42, 61, 81, 124, 131] to billion-scale web-pretrained transformers [19, 24, 29, 33, 54, 102, 130, 152, 158], the dominant strategy in vision has consistently been to adapt large-scale pretrained encoders to downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.67, + 0.888, + 0.777 + ], + "angle": 0, + "content": "There are many pretraining objectives today, each with distinct characteristics and each yielding representations better suited for specific tasks: vision-language contrastive losses [106, 160] learn a global vision and language embedding well-suited for zero-shot classification and retrieval as well as provide vision-language alignment for open-world [69, 94] and generative tasks [108, 114]; captioning losses [37, 137] learn to predict image descriptions using a language decoder, which transfers well to downstream multimodal language model (MLLM) tasks; and spatially self-supervised losses [44, 98] learn dense spatial correspondences without language supervision, making them useful for tasks requiring precise localization like object detection." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.783, + 0.888, + 0.845 + ], + "angle": 0, + "content": "Many works are now attempting to combine two or more of these techniques in different ways [19, 34, 35, 37, 45, 90, 110, 158]. While many have been successful, the complexity of these strategies grows exponentially with number of use cases, which can make scaling difficult. There has not yet been shown a single, simple, and easily scalable pretraining technique that can learn state-of-the-art features for all downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.851, + 0.889, + 0.913 + ], + "angle": 0, + "content": "In this work we discover that global vision-language contrastive learning alone can be one such approach. After building a state-of-the-art contrastive model for image and video, we found a surprising result: inside the model were specific features aligned to OCR, VQA, grounding, detection, depth estimation, and tracking. Compared to the state-of-the-art models with captioning [37] and spatially self-supervised [98] pretraining, our" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.949 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.114, + 0.079, + 0.885, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.215, + 0.885, + 0.286 + ], + "angle": 0, + "content": "Figure 1 Perception Encoder (PE) is a family of large-scale vision encoder models with state-of-the-art performance on a large variety of vision tasks. By using a robust contrastive pretraining recipe and finetuning on synthetically aligned videos, PE not only outperforms all existing models on classification and retrieval (§2), but it also internally produces strong, general features that scale for downstream tasks (§3). PE unlocks the ability for large-scale contrastive pretraining to transfer to downstream tasks with alignment tuning to capitalize on those general features (§4, §5)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.311, + 0.884, + 0.388 + ], + "angle": 0, + "content": "contrastive encoder has specific layers that, when used as frozen features, matches or exceeds the performance of the other two pretraining techniques on tasks they should be the best at. The only problem is—these features exist at different layers for each task. By exploiting this phenomenon with alignment tuning, we show it is possible to align these features to the end of the network in order to create state-of-the-art encoders for downstream MLLM and spatial tasks—all following the same easily scalable contrastive pretraining." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.394, + 0.885, + 0.576 + ], + "angle": 0, + "content": "We begin by building \\(\\mathrm{PE}_{\\mathrm{core}}\\) (Fig. 1, left), a large-scale contrastively pretrained model with state-of-the-art zero-shot performance on both images and video (\\(\\S 2\\)). To accomplish this, we first focus on developing a strong image-only contrastive pretraining recipe to extract general knowledge from billion-scale image-text data. Keeping the data and training FLOPs fixed, this recipe significantly improves upon vanilla CLIP in both absolute performance and robustness (\\(\\S 2.1\\)). We then use the resulting model as a frame-based encoder to develop a video data engine for generating well-aligned video captions. Finetuning on this synthetic video-text data substantially improves performance on both image and video classification and retrieval tasks (\\(\\S 2.2\\)). Motivated by this success, we release a large portion of the data used to train the engine: PE Video Dataset (PVD), consisting of 1M diverse videos with 120K human-refined annotations (\\(\\S 2.3\\)). Finally, we scale our robust image pretraining and well-aligned video finetuning strategy to 2B parameters to produce \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) (\\(\\S 2.4\\)), a single unified encoder that outperforms SigLIP2 [138] on zero-shot image tasks and InternVideo2 [146] on most zero-shot video tasks. We further transfer this power to smaller model scales through distillation." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.583, + 0.884, + 0.705 + ], + "angle": 0, + "content": "With the strongest image and video recognition model in hand, we shift our focus to downstream tasks. Remarkably, despite being pretrained with CLIP loss, we find that the intermediate layers of \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) can rival AIMv2-3B [37] on language tasks and DINOv2-g [98] on spatial tasks, both of which among the strongest pretrained models in their respective domains. Upon investigation, we attribute this capability to our robust image pretraining strategy, which appears to have unlocked the potential of contrastive pretraining to scale effectively for downstream tasks (§3). However, a challenge remains: the model does not naturally output these features, keeping them hidden internally. To address this, we introduce two alignment tuning methods (Fig. 1, right) to extract these strong, general features." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.711, + 0.884, + 0.773 + ], + "angle": 0, + "content": "First, in §4, we investigate the most effective technique to align features to the end of the network by adapting to a large language model. This language alignment enables us to construct \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\), which individually outperforms all other popular vision encoders for MLLM tasks. Moreover, when paired with our Perception Language Model (PLM) [21], the combination rivals the latest state-of-the-art MLLMs, like InternVL3 [168]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.779, + 0.884, + 0.87 + ], + "angle": 0, + "content": "Second, in §5, we identify a dichotomy in the layers optimal for spatial tasks. By visualizing the features and pinpointing the explicit reason for this dichotomy, we develop a straightforward spatial alignment approach: distilling from the model's own frozen features to achieve most of the alignment, complemented by a novel use of SAM 2 [111] for spatial correspondence distillation to refine the process. The resulting \\(\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}\\) not only outperforms other popular models in depth estimation, tracking, and semantic segmentation, but also sets a new absolute state-of-the-art on COCO [76] detection with a much simpler decoder." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.877, + 0.884, + 0.907 + ], + "angle": 0, + "content": "With this family of checkpoints, Perception Encoder unlocks the potential to scale one simple pretraining method to solve many downstream vision tasks. We are releasing our models, code, and PE Video Dataset." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.082, + 0.407, + 0.1 + ], + "angle": 0, + "content": "2 Perception Encoder: Core" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.108, + 0.888, + 0.154 + ], + "angle": 0, + "content": "To build Perception Encoder (PE), we start by training a large-scale, robust, and highly performant vision-language contrastive model for image and video. We have two objectives: first, to enhance the scalability and data efficiency of contrastive training; and second, to create a unified model effective on both image and video." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.161, + 0.888, + 0.269 + ], + "angle": 0, + "content": "These goals are somewhat conflicting: image-text data is plentiful and training on images is efficient, but video-text data is scarce and video training is expensive. Thus, we decouple image and video training into two stages. We first develop a strong image pretraining recipe (§2.1) with several regularization techniques to create a robust starting point. Then we use the resulting image model as a frame encoder to develop a video data engine (§2.2) supported by our novel human-refined video-text dataset (§2.3) to generate aligned captions for video clips. Finally, we finetune the image encoder on the resulting aligned video data (§2.4). Using our data engine design, this short finetuning step substantially improves both image and video performance." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.28, + 0.367, + 0.296 + ], + "angle": 0, + "content": "2.1 Robust Image Pretraining" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.304, + 0.889, + 0.395 + ], + "angle": 0, + "content": "In the first stage of pretraining, we want to learn as much visual information as possible from a large set of image-text data. Notably, a unique quirk of contrastive training is the loss for a given sample depends on the other samples in the batch. Because each batch is different, there is potential to learn new information every time an example is sampled, even if that sample has been seen before. Thus, we find contrastive learning to benefit from a long training schedule. To exploit this, we design our pretraining recipe with high regularization, stability, and training efficiency in mind." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.406, + 0.44, + 0.724 + ], + "angle": 0, + "content": "Setup. (Fig. 2.1) We track our changes on a vanilla CLIP model using an OpenCLIP [51] ViT-L/14 model at 224 resolution as a baseline. We keep the training budget fixed to around 1T GFLOPs (i.e., a ZFLOP), and train on a fixed 2.3B image-text dataset curated using the MetaCLIP [152] text-only curation pipeline. For the baseline, we use a global batch size of \\(32\\mathrm{K}\\), class token, AdamW [83], and train for 12B samples seen. To assess the generality of the information learned during pretraining, we report not only zero-shot ImageNet val [26] results but also the average performance across a range of robustness metrics, including ImageNet val [26], ImageNet v2 [112], ObjectNet [4], ImageNet Adversarial [47], ImageNet Rendition [46], and ImageNet Sketch [143]. As observed with other pure CLIP models [33, 106, 152], the average robustness metric performance of this vanilla recipe is much lower than ImageNet val alone." + }, + { + "type": "image", + "bbox": [ + 0.471, + 0.395, + 0.895, + 0.585 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.459, + 0.598, + 0.886, + 0.724 + ], + "angle": 0, + "content": "Figure 2 Robust Image Pretraining. We tune our pretraining recipe (§2.1) to maximize performance on a fixed set of data, starting with an OpenCLIP [51] ViT-L/14 model. We report cumulative zero-shot classification results for each modification. The inner bars show robustness evaluation, calculated as the average of 6 robustness benchmarks [4, 26, 46, 47, 112, 143], and the outer bars show ImageNet val [26] alone. Several changes significantly improve robustness, indicating that ImageNet val scales more with data, while robustness can scale with refined training techniques." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.736, + 0.886, + 0.797 + ], + "angle": 0, + "content": "Progressive Resolution. (Fig. 2.2) To enable longer training, we first improve training efficiency. As shown in many works [70, 71, 79, 131, 136], vision encoders work well with a progressively increasing resolution schedule. Thus, we halve the training FLOPs while maintaining performance by evenly splitting the baseline 12B-sample run into 98, 154, and 224 resolution stages, with 4B samples per stage." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.808, + 0.886, + 0.869 + ], + "angle": 0, + "content": "Increasing Batch Size. (Fig. 2.3) We use the extra budget to double the batch size from \\(32\\mathrm{K}\\) to \\(64\\mathrm{K}\\), increasing the total samples seen from 12B to 24B. Larger batch size means a higher likelihood for there to be a non-trivially novel pair of samples, i.e., hard negatives. This is akin to increasing the \"task difficulty\" of CLIP and improves ImageNet val by \\(+0.6\\%\\) and robustness by double of that, \\(+1.1\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.88, + 0.888, + 0.913 + ], + "angle": 0, + "content": "LAMB Optimizer. (Fig. 2.4) We switch from AdamW to LAMB [156], which is known to stabilize large batch training. More importantly, LAMB allows us to train stably with a higher learning rate of \\(2 \\times 10^{-3}\\) compared" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.112 + ], + "angle": 0, + "content": "to the original \\(5 \\times 10^{-4}\\). We observe that starting with a high learning rate is important to allow the model to adapt to different resolutions. These factors combine for \\(+0.4\\%\\) on ImageNet val and \\(+0.7\\%\\) on robustness." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.123, + 0.888, + 0.185 + ], + "angle": 0, + "content": "Increasing Final Resolution. (Fig. 2.5) A classic finding is that parameters and resolution should be scaled together [36, 131]. Thus, we add a fourth 336 resolution stage at the end of training. To keep the training FLOPs the same, we adjust the training schedule to 10B samples at 98 resolution, 8B at 154, 4B at 224, and 2B at 336. While ImageNet val only increases by \\(+0.5\\%\\), robustness improves threefold, rising by \\(+1.4\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.196, + 0.888, + 0.227 + ], + "angle": 0, + "content": "RoPE. (Fig. 2.6) We add 2D RoPE [127] to each attention layer to improve extrapolation, keeping the original position embedding. 2D RoPE only improves ImageNet val by \\(+0.3\\%\\) but enhances robustness by \\(+0.9\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.238, + 0.888, + 0.285 + ], + "angle": 0, + "content": "Attention Pooling. (Fig. 2.7) We follow [160] in constructing the CLIP embedding using an attention probing transformer block. Surprisingly, we found keeping the class token as an input to this block is important for small model performance. Together, this improves ImageNet val by \\(+0.3\\%\\) and robustness by \\(+0.9\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.296, + 0.888, + 0.373 + ], + "angle": 0, + "content": "Tuned Data Augmentation. (Fig. 2.8) Despite training on billions of samples, we find data augmentation still important—especially for transfer to unlikely scenarios like in ObjectNet [4]. We add heavy random cropping, brightness/saturation jitter, and horizontal flip. Random cropping encourages using the entire caption, as not everything is in frame. Jitter helps low-light settings and documents. Horizontal flip improves natural images and does not hurt OCR (see §2.5). These improve robustness by \\(+0.7\\%\\), notably, ObjectNet by \\(+2.4\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.383, + 0.888, + 0.46 + ], + "angle": 0, + "content": "Mask Regularization. (Fig. 2.9) As regularization, we want the model to produce the same features if some patches are not visible. However, passing the CLIP gradients through masked images may negatively alter behavior on unmasked images. Thus, we convert MaskFeat [147] into a regularization loss by duplicating and masking 1/16th of the batch. At the output, the masked tokens are aligned to their unmasked counterparts by maximizing cosine similarity. Care is taken to ensure that the CLIP and masked gradients are disjoint." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.47, + 0.888, + 0.547 + ], + "angle": 0, + "content": "Scaling Behavior. (Figs. 3 and 4) In Fig. 3, we show the performance of our recipe (Fig. 2.9) vs. the original CLIP recipe (Fig. 2.1) across S/14, B/14, and L/14 models. For each benchmark, our recipe scales around the same rate or better than the original CLIP recipe. On some difficult datasets like ObjectNet [4] and ImageNet Adversarial [47], our recipe shows distinctly better scaling. This indicates that the improvements in performance were not at the cost of scalability, meaning we can further benefit from scaling the model size." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.559, + 0.242, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.246, + 0.559, + 0.368, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.559, + 0.498, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.559, + 0.624, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.559, + 0.753, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.559, + 0.882, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.645, + 0.888, + 0.674 + ], + "angle": 0, + "content": "Figure 3 Scaling Behavior (Model Size). Results before and after our recipe changes (Fig. 2) for S/14, B/14, and L/14 models. Our recipe improves scaling for difficult metrics like ObjectNet [4] and ImageNet Adeversarial [47]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.689, + 0.888, + 0.766 + ], + "angle": 0, + "content": "In Fig. 4, we additionally show the performance of our recipe vs. the original CLIP recipe across L/14 models trained with 120K steps (one-third schedule), 240K steps (two-thirds schedule), and 360K steps (full ablation schedule). All models are their own training runs with full learning rate annealing and the progressive resolution schedule adjusted proportionally. We see nearly linear trends for our recipe on most datasets. This suggests we can train longer for more performance, even at L scale and with 24B samples seen already." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.778, + 0.242, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.246, + 0.778, + 0.37, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.376, + 0.778, + 0.498, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.778, + 0.626, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.778, + 0.754, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.778, + 0.882, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.864, + 0.888, + 0.906 + ], + "angle": 0, + "content": "Figure 4 Scaling Behavior (Training Steps). Results before and after our recipe changes for an L/14 model trained with 120K, 240K, and 360K steps, adjusting the learning rate and progressive resolution schedules accordingly. Despite our recipe being much stronger than the original, there is still room for further improvement by training longer." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.506, + 0.949 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.08, + 0.655, + 0.098 + ], + "angle": 0, + "content": "2.2 Bootstrapping a Video Data Engine with Perception Encoder" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.105, + 0.359, + 0.36 + ], + "angle": 0, + "content": "With a robust image pretraining recipe settled and its scaling behavior confirmed, our next step is to extend the image-only encoder to accommodate video and build a unified image-video model. Unlike web-scale image-text data, which comes in many cases with human-generated descriptive alt-text information, videos with aligned language annotation are inherently scarce. High-quality human-annotated captions for videos are even rarer. This scarcity presents a unique and significant challenge in training encoders capable of effectively processing video inputs." + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.13, + 0.887, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.379, + 0.28, + 0.887, + 0.351 + ], + "angle": 0, + "content": "Figure 5 Video Data Engine. To create aligned video-text data for contrastive training, we use a PE-based video captioner [21] to generate a holistic video caption and an image-level captioner [82] on sampled frames. We then provide those captions as well as the original video metadata to text-only LLM [82] to synthesize a single short, aligned caption optimal for contrastive training." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.361, + 0.886, + 0.422 + ], + "angle": 0, + "content": "Inspired by the recent success of image data engines [58, 64, 96, 111, 151], we extend this concept to develop a robust video data engine that generates well-aligned synthetic captions for a diverse set of videos, facilitating the training of a video encoder. This innovative approach represents the first large-scale exploration of its kind. In the following sections, we introduce the process of building our video data engine." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.429, + 0.888, + 0.507 + ], + "angle": 0, + "content": "To bootstrap our contrastive video finetuning, we focus on synthesizing video captions. We build our data engine in three stages: (1) we create a strong baseline video captioner, which we call the Perception Language Model (PLM), described in [21]; (2) we add additional high quality video data with human-refined captions to further enhance the captioner's quality; (3) we refine and summarize the generated video captions with an LLM to construct a large video dataset to use for the contrastive video finetuning of our Perception Encoder." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.516, + 0.888, + 0.579 + ], + "angle": 0, + "content": "Phase 1: Base Video Captioner (PLM). We build our data engine on an early version of PLM [21], a multimodal large language model with PE as the vision encoder and Llama [82] as the language decoder. We train PLM on a large-scale collection of open-access image and video datasets [21]. In total, the training dataset consists of 64.7M images and videos covering natural images, charts, documents, exocentric and egocentric videos." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.589, + 0.475, + 0.696 + ], + "angle": 0, + "content": "Phase 2: PLM + Refined Data. To further boost captioning performance, we collect a set of 265K videos (105K from PVD which we release, see §2.3), caption them with our base PLM model, and ask human raters to refine the captions1. We then fine-tune our base PLM model with this data, significantly improving captioning quality (see Tab. 1)." + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.589, + 0.878, + 0.635 + ], + "angle": 0, + "content": "
CaptionerAuroraCap [13]VCG Diverse [87]VCG Bench [86] Score
ScoreAccScoreAcc
PLM2.251.93.165.134.3
PLM + Human-Refined Data3.471.13.679.435.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.494, + 0.638, + 0.888, + 0.695 + ], + "angle": 0, + "content": "Table 1 Video Captioning. We use an early version of PLM-8B [21], consisting of our image-only PE encoder and a Llama decoder, for captioning. Adding human-refined data greatly boosts captioning performance (higher is better)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.707, + 0.886, + 0.799 + ], + "angle": 0, + "content": "Phase 3: LLM Summarization. We synthesize the final aligned video captions by incorporating the PLM video captions, Llama 3.2 [82] image-only frame captions, and the existing video metadata of video titles and descriptions (Fig. 5). Similar to image alt-text, video metadata contains knowledge often not covered by the image and video captioning models. Thus, combining the two leads to more comprehensive captions. We summarize video captions, frame captions, and video metadata together using the Llama 3.3 70B model to provide the final captions. The prompt used to generate the summary can be found in Appendix A.1." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.809, + 0.888, + 0.84 + ], + "angle": 0, + "content": "Using the Engine. Finally, we use the resulting data engine bootstrapped with an image-only checkpoint of PE to generate well-aligned, information-dense captions for a diverse set of 22M videos for contrastive finetuning." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.851, + 0.888, + 0.882 + ], + "angle": 0, + "content": "Training with Recaptioned Videos. Our goal is to develop a unified image and video encoder. To encode videos using our existing image encoder, we uniformly sample \\( N = 8 \\) frames from video clips and extract frame-level" + }, + { + "type": "page_footnote", + "bbox": [ + 0.129, + 0.89, + 0.685, + 0.904 + ], + "angle": 0, + "content": "1The annotators are instructed to remove, correct, and add information from the captions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.158 + ], + "angle": 0, + "content": "embeddings with the image encoder. We then apply average pooling over these frame embeddings to obtain video embeddings, which are used for contrastive learning with encoded video captions by the text encoder. Despite being extremely simple, we find this technique surprisingly effective in producing a strong joint image-video encoder. We share this finding with previous studies [19, 84], which note that simple average pooling outperforms more complex pooling strategies like attention-based compression for video." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.169, + 0.434, + 0.41 + ], + "angle": 0, + "content": "Ablations. In Tab. 2, we conduct an ablation study on the components of the video data engine by finetuning an intermediate image-only checkpoint on 17M of the 22M videos recaptioned by our video data engine. The results show that the video data engine significantly enhances zero-shot classification and retrieval performance for both image and video benchmarks, compared to the image-only baseline encoder (first row). Notably, using the video data engine's video-level and frame-level captions provides significant improvements over relying solely on metadata such as video title and description (second row), highlighting the importance of building a robust video data engine to compensate for noise in web videos." + }, + { + "type": "table", + "bbox": [ + 0.461, + 0.173, + 0.882, + 0.284 + ], + "angle": 0, + "content": "
TitleDescriptionVideo CaptionFrame CaptionAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet vial [26]ImageNet vial [12]ObjectNet IV Classes [4]MS-COCO mrt→img [76]MS-COCO mrt→mrt [76]Average VideoKinetics 400 [55]Kinetics 400 [55]MSR-VTT mrt→vid [153]MSR-VTT mrt→mrt [153]48.1
72.683.377.885.849.466.850.969.768.438.027.3
75.483.278.287.147.366.056.074.173.539.037.3
78.283.578.486.856.074.360.973.873.447.648.8
✓*78.183.779.087.754.173.060.975.475.146.746.5
78.283.779.087.554.673.261.675.875.547.448.1
" + }, + { + "type": "text", + "bbox": [ + 0.456, + 0.287, + 0.886, + 0.399 + ], + "angle": 0, + "content": "Table 2 Video Data Engine Ablation. We ablate our video data engine in Fig. 5 by finetuning on an in-development image-only version of PE by averaging the frame embeddings to create a single video CLIP embedding. Video captions are generated by PLM trained with or without * human-refined data (see §2.3). Frame captions are generated by the Llama 3.2 vision model. Each component helps on different metrics, overall culminating in a huge boost to both image and video zero-shot performance." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.411, + 0.888, + 0.442 + ], + "angle": 0, + "content": "Our analysis reveals that the most critical components are the video metadata and PLM's video caption; however, all components are necessary to achieve peak performance in our video data engine." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.448, + 0.888, + 0.495 + ], + "angle": 0, + "content": "In Fig. 6, we investigate the impact of scaling recaptioned video data on a later checkpoint of the same image-only model as in Fig. 2. Notably, scaling synthetic video data demonstrates consistent improvement in both image and video benchmarks. Full results of this scaling experiment can be found in the Appendix 19." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.501, + 0.888, + 0.562 + ], + "angle": 0, + "content": "In the top row, scaling synthetic video data consistently improves performance on image benchmarks, with monotonic improvements of \\(+1.1\\%\\) in ObjectNet and \\(+1.6\\%\\) in ImageNet Adversarial. ImageNet val and ImageNet v2 have smaller gains, with accuracy increases of \\(0.3\\%\\) to \\(0.5\\%\\), plateauing at \\(\\sim 7\\mathrm{M}\\) samples. We also observe a significant boost to zero-shot retrieval (here, COCO [76]) of \\(+3.8\\%\\) to \\(+4.1\\%\\) top-1 recall." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.569, + 0.888, + 0.66 + ], + "angle": 0, + "content": "The video tasks listed in the bottom row demonstrate a consistent story. We observe a significant jump in performance between none and 3M videos across all video classification tasks, indicating that there is a domain gap for image-only models that hinders their ability to perform well on video out of the box. Further scaling synthetic video data leads to substantial performance gains in both video classification and retrieval. Video classification accuracy improves consistently by \\(+5.6\\%\\) to \\(+11.7\\%\\) without plateauing, while video retrieval shows significant improvements of \\(+7.7\\) to \\(+15.3\\) top-1 recall." + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.68, + 0.877, + 0.863 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.87, + 0.886, + 0.898 + ], + "angle": 0, + "content": "Figure 6 Video Data Scaling. Finetuning on videos recaptioned by the PE video data engine from 0M (baseline image-only model) to 17M samples consistently improves both image and video performance, both classification and retrieval." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.12, + 0.88, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.421, + 0.196, + 0.578, + 0.208 + ], + "angle": 0, + "content": "Category: Food Preparation" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.213, + 0.482, + 0.299 + ], + "angle": 0, + "content": "Model caption: \"The video shows a person making a cake. The person is seen mixing ingredients in a bowl, pouring the mixture into a pan, and adding chocolate chips. The video also shows the person using a whisk and a spatula to mix the ingredients and pour the mixture into the pan. The person is wearing a blue shirt and is seen holding a glass bowl and a spoon. The background of the video is a white marble countertop, and there is a striped towel on the counter.\"" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.213, + 0.875, + 0.3 + ], + "angle": 0, + "content": "Human refinement: \"The video shows a person making a chocolate cake. The person mixes ingredients in a glass bowl and pours the mixture into a baking pan. Then, the person adds chocolate chips on top. The video also shows the person mixing the ingredients with an electric whisk and scraping the mixture into the pan with a spatula. The person is wearing a blue shirt and is seen holding a glass bowl. The background of the video is a white marble countertop, and there is a striped towel on the counter.\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.31, + 0.886, + 0.366 + ], + "angle": 0, + "content": "Figure 7 PE Video Dataset Example. A sample from PVD, our released video-text dataset. Initial captions are generated by our video captioning model and then refined by human annotators. Annotators are instructed to add details and remove model hallucination. In this example, the model hallucination \"a spoon\" is removed; and more details such as \"glass bowl\" and the action \"scraping\" are added. See Appendix Fig. 18 for more." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.393, + 0.888, + 0.44 + ], + "angle": 0, + "content": "These experiments highlight the quality of our video data engine and its ability to significantly improve encoder performance, even with only a relatively modest 17M videos compared to the billions of images seen during pretraining. Our video data engine is a vital component in build a strong, unified image-video encoder." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.457, + 0.357, + 0.472 + ], + "angle": 0, + "content": "2.3 PE Video Dataset (PVD)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.482, + 0.888, + 0.529 + ], + "angle": 0, + "content": "For the benefit of the community, we release a new video dataset: PE Video Dataset (PVD).2 PVD comprises of 1M high-quality and diverse videos with accompanying tags and descriptions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.536, + 0.886, + 0.612 + ], + "angle": 0, + "content": "We additionally select 120K of these videos with the highest degree of motion to annotate with detailed captions by generating synthetic captions using our video captioner (§2.2) and employing 200 annotators to verify and refine them. We ask the human annotators to improve the synthetic captions by removing any hallucinations, correcting words that describe the video inaccurately, eliminating repetitive or redundant words to make the caption more concise, and adding any missing actions being performed in the video." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.619, + 0.666, + 0.709 + ], + "angle": 0, + "content": "We release two versions of annotations for the 120K PVD subset: (1) Human verified captions: extended summaries with an average length of 57.1 words that provide a high-level description of each video. These captions are suitable for CLIP-style training. (2) Long automated captions: detailed and fine-grained descriptions with an average length of 111.7 words that capture spatial and temporal events. These captions are ideal for fine-grained video understanding." + }, + { + "type": "table", + "bbox": [ + 0.69, + 0.624, + 0.882, + 0.692 + ], + "angle": 0, + "content": "
Videos998,862
Human Captions118,862
Total Duration4625 hrs
Duration (s)16.7±9.8
Human Caption Length57.1±25.4
Model Caption Length111.7±43.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.712, + 0.695, + 0.862, + 0.707 + ], + "angle": 0, + "content": "Table 3 PVD Statistics." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.717, + 0.886, + 0.763 + ], + "angle": 0, + "content": "In Fig. 7, we visualize a video example together with their model and human captions from PE Video Dataset (See Fig. 18 for more). The dataset statistics are summarized in Tab. 3. Finally, We use \\(105\\mathrm{K}\\) of these refined samples to improve the data engine (\\(\\S 2.2\\) phase 2) and \\(15\\mathrm{K}\\) as a high-quality video retrieval benchmark." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.774, + 0.888, + 0.88 + ], + "angle": 0, + "content": "PVD Benchmark. We use 15K of the human-refined video-caption pairs as a held-out test set, which we introduce as a new video retrieval benchmark, PVD Benchmark, to evaluate finegrained video-caption alignment. We follow the format of MSR-VTT [153] to construct the benchmark. We select videos from 10 different categories, including hand actions, object interactions, food preparation, work activities, outdoor scenes, animals, water scenes, object handling, close-up shots, and nature scenes, with an overall average caption length of 51.7 words (see Appendix A.2.3 for statistics). We use PVD Benchmark to evaluate SigLIP [160], SigLIP2 [138], InternVL [19], and PE models, and the results can be found in Tab. 7." + }, + { + "type": "page_footnote", + "bbox": [ + 0.129, + 0.889, + 0.493, + 0.903 + ], + "angle": 0, + "content": "\\(^{2}\\)PVD available at https://ai.meta.com/datasets/pe-video/" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.08, + 0.475, + 0.096 + ], + "angle": 0, + "content": "2.4 A Unified Encoder for Image and Video" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.105, + 0.884, + 0.134 + ], + "angle": 0, + "content": "Using a robust, scalable image pretraining recipe and video-pretraining data recaptioned by the proposed video data engine, in this section we present \\(\\mathsf{PE}_{\\mathrm{core}}\\) , a unified image-and-video encoder." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.147, + 0.549, + 0.223 + ], + "angle": 0, + "content": "Model Architecture. To capitalize on the promising scaling behavior observed in §2.1, we scale the largest \\(\\mathrm{PE}_{\\mathrm{core}}\\) model to 2B parameters3 (G scale). Tab. 4 shows the detailed model configuration of the vision and text transformers and the dimension of the output clip embedding space." + }, + { + "type": "table", + "bbox": [ + 0.577, + 0.146, + 0.885, + 0.224 + ], + "angle": 0, + "content": "
ScaleTowerParamsWidthDepthMLPHeadsCLIP Dim
BVision0.09B768123072121024
Text0.31B102424409616
LVision0.32B1024244096161024
Text0.31B102424409616
GVision1.88B1536508960161280
Text0.47B128024512020
" + }, + { + "type": "table_caption", + "bbox": [ + 0.621, + 0.227, + 0.836, + 0.24 + ], + "angle": 0, + "content": "Table 4 PE Model Configurations." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.234, + 0.55, + 0.249 + ], + "angle": 0, + "content": "Smaller Model Distillation. To maximize the performance of" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.25, + 0.886, + 0.326 + ], + "angle": 0, + "content": "smaller models (B and L scales in Tab. 4), we employ a distillation finetuning approach [49] using \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) as the teacher. This process involves a short finetuning schedule where both the student and teacher models encode image and text inputs separately to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence, distilling multimodal relational knowledge from the teacher into the student." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.332, + 0.884, + 0.395 + ], + "angle": 0, + "content": "Notably, we find that using a smaller softmax temperature for the teacher's distributions, specifically \\(0.5 \\times\\) the temperature used for the student's distribution, significantly enhances the effectiveness of knowledge distillation. By leveraging the strong embeddings provided by \\(\\mathrm{PE}_{\\mathrm{core}} \\mathrm{G}\\), our short distillation finetuning schedule significantly boosts the performance of both B and L scale models of \\(\\mathrm{PE}_{\\mathrm{core}}\\) (see Appendix C.3)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.404, + 0.6, + 0.42 + ], + "angle": 0, + "content": "Model Training. The training process of \\(\\mathrm{PE}_{\\mathrm{core}}\\) involves three stages:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.427, + 0.884, + 0.474 + ], + "angle": 0, + "content": "1. Image pretraining. We scale up image pretraining to 5.4B publicly available image alt-text pairs curated with MetaCLIP [152] and a total of 86B samples seen to ensure convergence (58B for B and L). We use a global batch size of 131K, with progressive resolution from 98 to up to 448 depending on the model." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.48, + 0.885, + 0.572 + ], + "angle": 0, + "content": "2. Image and video finetuning. Following the initial pretraining, we subsequently finetune the model at max resolution with a short schedule for 50M samples on the image pretraining data (as cooldown) followed by 22M samples on the recaptioned videos with a smaller learning rate and batch size. The video captions are produced using the proposed video data engine (§2.2). For each video clip, we uniformly sample 8 frames, encode them, take their average to produce a single video embedding, and align them with the corresponding video captions using the same contrastive objective in image training." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.578, + 0.884, + 0.625 + ], + "angle": 0, + "content": "3. Smaller model distillation. We distill the 2B model (G scale) into smaller contrastive pretrained models at B and L scales under their final resolutions, using a short schedule that covers approximately 4B samples seen (\\(\\sim 8\\%\\) of the pretraining schedule) with a lower learning rate and no weight decay." + }, + { + "type": "list", + "bbox": [ + 0.131, + 0.427, + 0.885, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.631, + 0.658, + 0.647 + ], + "angle": 0, + "content": "The detailed training configuration and setups are listed in Appendix B.1.1." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.665, + 0.265, + 0.68 + ], + "angle": 0, + "content": "2.5 Core Results" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.694, + 0.886, + 0.815 + ], + "angle": 0, + "content": "Zero-Shot Image Results. In Tab. 5, we present \\(\\mathrm{PE}_{\\mathrm{core}}\\) 's performance on zero-shot image benchmarks for classification and retrieval vs. the strongest existing models, including SigLIP2 [138] and proprietary models using JFT-3B [29], which is likely tuned for ImageNet. \\(\\mathrm{PE}_{\\mathrm{core}}\\) outperforms all other contrastive models across the board on all zero-shot tasks, including the highly competitive average of zero-shot ImageNet robustness metrics [4, 26, 46, 47, 112, 143]. This marks a significant achievement, as we are the first to accomplish this in over 3 years without access to Google's internal JFT-3B [29] or WebLI [17] datasets. And at the same time, \\(\\mathrm{PE}_{\\mathrm{core}}\\) also exceeds the existing state-of-the-art on image-text retrieval and significantly improves on fine-grained classification—the first to simultaneously hold state-of-the-art on all common zero-shot categories." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.822, + 0.884, + 0.869 + ], + "angle": 0, + "content": "By harnessing the power of our video data engine, training with a relatively small dataset of 22M videos and their corresponding synthetic captions leads to substantial gains in image benchmarks, with average general image classification improving by \\(+0.6\\%\\) with emphasis on more difficult benchmarks (notably \\(+1.2\\%\\)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.11, + 0.877, + 0.884, + 0.903 + ], + "angle": 0, + "content": "3We employ the setup described in §2.1 except for the additional class token (only used for L and B). Interestingly, we find using the same high learning rate \\((2 \\times 10^{-3})\\) to perform well for G. We also did not find scaling the text encoder to be beneficial." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.949 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.107, + 0.079, + 0.896, + 0.354 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Fine-Grained Classification
Avg Class.ImageNet w1 [26]ImageNet i2 [112]ObjectNet IV Classes [4]ImageNet adversarial [47]ImageNet Adversarial [48]ImageNet Renditions [46]ImageNet Sketch [143]Avg Fine.Food 107 [9]Flowers Oxford [97]Pets Oxford [100]Cars Stanford [59]Aircrafts FGC [88]Countries 2/1 [133]Scenes SUN397 [150]Satellite RESISC [20]Avg Retrieval1Zero-Shot Retrieval MS-COCO t+to ing [76]
Proprietary0.24B2246.6B84.385.786.380.682.385.695.776.1-95.191.297.9--------------------------------------------------0.24B2246.6B84.385.786.380.695.776.1-95.191.297.9-----------------------------------
BASIC [102]1.0B5764.8B85.786.380.695.776.1-95.191.297.9----------------------------72.651.266.380.492.585.786.380.695.776.1-------------------------------------------------MS-COCO t+to ing [76]MS-COCO img→to ing [76]MS-COCO img→to ing [75]
CoCa [158]1.0B5764.8B85.786.380.695.776.1-95.191.297.9---------------------72.651.266.380.492.585.786.380.695.776.1---0.24B2246.6B85.786.380.695.776.1-------------------------------------
LiT-22B [24]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.357, + 0.89, + 0.427 + ], + "angle": 0, + "content": "Table 5 Zero-Shot Image Results. Image zero-shot performance of \\(\\mathrm{PE}_{\\mathrm{core}}\\) compared to the state-of-the-art for both proprietary and open models. \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) is the first vision encoder to outperform the best models trained on the proprietary JFT-3B [29] and WebLI [17] on general classification. Moreover at all model sizes, \\(\\mathrm{PE}_{\\mathrm{core}}\\) obtains state-of-the-art results across general classification, retrieval, and finegrained classification. \\(\\dagger\\)Re-evaluated: DFN by [130]; SigLIP and SigLIP2 by us with the same benchmark settings if not reported in [138] (see Appendix B.1.2)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.452, + 0.889, + 0.514 + ], + "angle": 0, + "content": "ObjectNet, \\(+1.4\\%\\) ImageNet Adversarial) and fine-grained classification by \\(+1.0\\%\\) on average. Furthermore, due to the high level of detail and alignment of our synthetic captions, zero-shot retrieval is significantly boosted by \\(+3.6\\%\\) on average. These results emphasize that training with well-aligned video text data does not just improve video performance—it creates a strictly better model for both videos and images." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.525, + 0.321, + 0.646 + ], + "angle": 0, + "content": "Zero-Shot Video Results. We assess the performance of \\(\\mathrm{PE}_{\\mathrm{core}}\\) on zero-shot video benchmarks by employing the same model as a frame-based video encoder, utilizing 8 uniformly sampled frames, as described in §2.2." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.654, + 0.321, + 0.849 + ], + "angle": 0, + "content": "We present the corresponding video results in Tab. 6. Our base image encoder already outperforms all other image-only encoders on both zero-shot classification and retrieval, including SigLIP2-g-opt. With video finetuning, \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) significantly outperforms even native video models that use full temporal attention on video classification, and nearly matches the" + }, + { + "type": "table", + "bbox": [ + 0.34, + 0.529, + 0.892, + 0.768 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolution #FramesVideo DataZero-Shot ClassificationZero-Shot Retrieval
Avg Class.Kinetics 409 [55]Kinetics 600 [55]Kinetics 700 [55]UCF 101 [126]HMDB 57 [62]Avg RetrievalMSR-VTT 304 [76]MSR-VTT 304 [76]MSVD 304 [76]MSVD 304 [76]MSVD 304 [76]ActivityNet 304 [76]ActivityNet 304 [76]
B Scale
CLIP [106]0.1B2248n/a54.358.455.146.168.943.229.230.424.240.557.29.113.2
CLIP4CLIP [84]0.1B22412n/a-------32.0-38.5---
SigLIP2-B/16†[138]0.1B2248n/a57.358.755.048.482.042.339.938.530.149.067.228.625.8
PEcoreB0.1B224822M63.965.665.155.884.648.249.947.647.350.476.739.038.4
L Scale
UMT-L [67]0.3B224825M------47.140.737.149.074.541.939.4
SigLIP2-L/16†[138]0.3B3848n/a64.165.362.556.886.749.344.741.531.453.774.235.931.5
PEcoreL0.3B336822M71.473.472.765.387.158.554.850.350.157.282.446.442.1
Unbounded Scale
InternVL [19]5.5B2248n/a-69.168.960.6---44.740.2----
InternVideo2 [146]1.0B2248102M70.773.172.864.988.853.959.951.950.958.183.360.454.8
VideoPrism-g* [164]1.1B28816619M-76.4-----39.771.0--52.750.3
SigLIP2-g-opt†[138]1.1B3848n/a68.269.867.061.890.751.846.643.134.255.874.638.333.4
PEcoreG (image only)1.9B4488n/a70.973.172.264.389.555.547.644.335.254.373.941.436.3
PEcoreG1.9B448822M74.876.976.169.190.761.158.751.249.959.785.454.751.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.34, + 0.77, + 0.888, + 0.841 + ], + "angle": 0, + "content": "Table 6 Zero-Shot Video Results. Video performance of \\(\\mathrm{PE}_{\\mathrm{core}}\\) compared to recent video and image encoders. \\(\\mathrm{PE}_{\\mathrm{core}}\\) obtains state-of-the-art in video classification and comparable performance on retrieval benchmarks while using only 22M videos. \\(^*\\) Proprietary models. \\({}^{+}\\mathrm{SigLIP2}\\) are evaluated by us with the same zero-shot prompts frame embedding averaging strategy (as in [19, 84, 106]). See Appendix B.1.2." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.85, + 0.889, + 0.911 + ], + "angle": 0, + "content": "state-of-the-art on video retrieval using a simple frame-level encoder. This result underscores the importance of our video data engine, resulting in \\(+3.9\\%\\) on average zero-shot video classification, and a massive \\(+11.1\\%\\) on retrieval. Moreover, \\(\\mathrm{PE}_{\\mathrm{core}}\\) does this with much less video data compared to other video-based approaches like InternVideo2 [146] and VideoPrism [164], highlighting the benefits of a joint image-video encoder." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.124, + 0.078, + 0.552, + 0.219 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Retrieval
ObjectNet [4]ObjectNet [4]Inaturalist 2017 [140]Dollar St 58 [39, 113]TextCaps img→cat [122]TextCaps Flip img→cat [122]PVD Bench img→vidPVD Bench vid→cat
SigLIP2-B/16 [138]0.1B22410B73.659.116.955.972.069.853.960.1
PEcore B0.1B2245.4B71.958.325.952.172.371.959.861.1
SigLIP2-L/16 [138]0.3B38410B84.473.226.757.678.076.261.967.1
PEcore L0.3B3365.4B84.774.335.359.678.578.364.765.2
InternVL-C [19]5.5B2245B80.667.219.458.272.367.863.465.1
SigLIP2-g-opt [138]1.1B38410B88.078.131.559.378.876.962.567.1
PEcore G1.9B4485.4B88.279.041.162.378.878.777.076.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.221, + 0.565, + 0.264 + ], + "angle": 0, + "content": "Table 7 Additional Zero-Shot Results. We present several additional zero-shot benchmarks from existing datasets and our own PVD (§2.3) to address evaluation gaps left by standard benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.604, + 0.079, + 0.885, + 0.206 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolutionDataEncoder Probing
ImageNet [26]ImageNet [26]ImageNet [26] Attention
DINOv2-g [98]1.1B224145M83.586.5\\( 87.2^{\\dagger} \\)
RADIOv2.5-g [45]1.1B518-85.3--
AIMv2 3B [37]2.7B4487.2B--89.5
InternVL-C [19]5.5B2245B-88.2-
EVA 18B [130]17.5B2242B-88.9-
\\( PE_{core}G \\)1.9B4485.4B86.889.589.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.602, + 0.209, + 0.888, + 0.266 + ], + "angle": 0, + "content": "Table 8 Encoder Probing Results. We evaluate \\(\\mathrm{PE}_{\\mathrm{core}}\\) G's frozen features using the typical probing methods to compare to models without zero-shot support. from [37]." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.291, + 0.889, + 0.339 + ], + "angle": 0, + "content": "Additional Zero-Shot Benchmarks. We further evaluate \\(\\mathrm{PE}_{\\mathrm{core}}\\) on an additional set of zero-shot classification and retrieval benchmarks we construct in Tab. 7 to address key gaps in common benchmarks. For comparison, we also evaluate SigLIP2 [138] and InternVL-C [19] on these benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.344, + 0.889, + 0.466 + ], + "angle": 0, + "content": "First, we note that the version of ObjectNet [4] that is standard to benchmark robustness (e.g., in Tab. 5) is not the full set. ObjectNet consists of 313 classes of objects in challenging and uncommon orientations, locations, and viewpoints. However, the standard version used for benchmarking is a 113 class subset of classes that overlap with ImageNet-1k [26]. Naturally, benchmarking in this way rewards performing well on ImageNet classes over generality. To remove this bias, we construct the full ObjectNet set with all classes and compare to the reduced ObjectNet set in Tab. 7. Surprisingly, we find that while \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) performs \\(+7.6\\%\\) over InternVL-C and only \\(+0.2\\%\\) over SigLIP2-g-opt on the reduced ObjectNet set, it performs \\(+11.8\\%\\) over InternVL-C and \\(+0.9\\%\\) over SigLIP2-g-opt on the full set of classes, highlighting PE's generality." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.472, + 0.889, + 0.655 + ], + "angle": 0, + "content": "Next, we include iNaturalist [140] as a zero-shot benchmark because of its level of specificity with 2,101 fine-grained long-tail classes. \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) outperforms the next best SigLIP2-g-opt model by \\(+9.6\\%\\) , emphasizing PE's long tail knowledge. We then evaluate PE's cultural diversity on Dollar Street \\([113]^4\\) , which consists of images of under-represented populations. Here too we find \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) to outperform existing methods, with \\(+3.0\\%\\) over SigLIP2-g-opt. Further, we test OCR performance by setting up TextCaps [122] as a retrieval dataset. Notably, \\(\\mathrm{PE}_{\\mathrm{core}}\\) performs on par or better than SigLIP, which is known for good OCR performance. This is potentially surprising, as the horizontal flip augmentation we used during robust pretraining (S2.1) is typically thought to hurt OCR performance. However, instead it seems to have given \\(\\mathrm{PE}_{\\mathrm{core}}\\) the ability to read backwards: we test the same TextCaps retrieval but with all images horizontally flipped. Other models suffer from this, but \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) 's performance only drops by \\(0.1\\%\\) . Finally, we evaluate \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) on the PVD benchmark (S2.3), a challenging video retrieval task on 15K diverse and human-refined videos. Here, \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) significantly outperforms InternVL [19] by \\(+13.6\\%\\) on text \\(\\rightarrow\\) video and \\(+9.5\\%\\) to SigLIP2 [138] on video \\(\\rightarrow\\) text." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.666, + 0.889, + 0.743 + ], + "angle": 0, + "content": "Frozen Encoder Probing Results. To compare against models that are not capable of zero-shot classification, we additionally evaluate \\(\\mathrm{PE}_{\\mathrm{core}}\\) using k nearest neighbors (following [98]), linear probing (following [19]), and attention probing (following [37]) on top of the ImageNet-1k [26] train set. We present these results in Tab. 8 and compare to other encoders using their reported numbers. In every case, \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) outperforms all existing open encoders, including those with significantly more parameters." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.753, + 0.889, + 0.815 + ], + "angle": 0, + "content": "Summary. \\(\\mathrm{PE}_{\\mathrm{core}}\\) , a unified image-video encoder, achieves state-of-the-art performance across zero-shot classification and retrieval on both images and videos on a wide variety of benchmarks. This synergy is made possible by our robust image pretraining recipe (§2.1) and powerful video data engine (§2.2), which together enable the model to effectively leverage the strengths of both image and video data at scale." + }, + { + "type": "page_footnote", + "bbox": [ + 0.129, + 0.899, + 0.694, + 0.914 + ], + "angle": 0, + "content": "4We use the version provided by [39] and re-evaluate all models to ensure a fair comparison." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.937, + 0.51, + 0.95 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.082, + 0.566, + 0.101 + ], + "angle": 0, + "content": "3 General Features in a Contrastive Disguise" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.114, + 0.888, + 0.16 + ], + "angle": 0, + "content": "\\(\\mathrm{PE}_{\\mathrm{core}}\\) puts up strong results on the tasks contrastive encoders are known for, like zero-shot classification and retrieval. But while those tasks are useful, they are only a small part of the vision ecosystem. What really matters is whether or not the features learned with our pretraining recipe are useful to downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.167, + 0.888, + 0.244 + ], + "angle": 0, + "content": "Today's common wisdom in the vision community cites that different pretraining methods result in features useful for different tasks: e.g., contrastive for classification, captioning for language modeling, and self-supervised learning for spatial tasks. To see how \\(\\mathrm{PE}_{\\mathrm{core}}\\) stacks up against against models with different pretraining techniques, we compare its frozen features to the state-of-the-art large-scale models for captioning (AIMv2-3B [37]) and self-supervised learning (DINOv2-g [98]) on a variety of downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.255, + 0.399, + 0.646 + ], + "angle": 0, + "content": "Layerwise Feature Analysis. We summarize the results of our frozen feature analysis in Fig. 8 for several downstream benchmarks in 3 categories: classification, language modeling, and spatial tasks. For classification, we probe each model using a randomly initialized cross attention transformer block. For language alignment, we use the Perception Language Model (PLM) [21] frozen encoder evaluation setup, learning a projector and finetuning a decoder-only LLM (see §4), and for spatial tasks we train with several different decoders (ViTDet [72] Mask-RCNN [43] with Absolute Win [7] for detection, DPT [109] for depth, and zero-shot feature correspondence for tracking [52]). For each experiment, we sweep over the layers of the model as the optimal features are not necessarily the last [18]. In each case, we use an equivalent image size (window size for detection) of \\(32 \\times 32\\) tokens. In each plot, we normalize performance by the maximum and minimum performance across models on that task." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.659, + 0.398, + 0.884 + ], + "angle": 0, + "content": "An Alignment Problem. This analysis reveals several insights. First, as expected, AIMv2 performs well at classification and the best at visual Q&A language tasks. Similarly, DINOv2 performs the well on spatial tasks like detection, depth, and even performs the best at grounding through an LLM. Then as already established by other works: DINOv2 lacks performance on OCR tasks [134]. This is no secret, but what is interesting is that its performance peaks in the middle of the network and then drops significantly by the end. And so does the performance of other models" + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.259, + 0.885, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.417, + 0.763, + 0.888, + 0.874 + ], + "angle": 0, + "content": "Figure 8 Layer Analysis. Evaluating intermediate layers as frozen features across tasks for different pretraining methods: captioning (AIMv2-3B [37], left), spatially self-supervised (DINOv2-g [98], middle), and our contrastive recipe \\(\\mathrm{(PE_{core}G}\\), right). Vertical lines denote the best layer and horizontal lines the best performance across models. As expected, AIMv2 performs well on language but not spatial, and DINOv2 performs well on spatial but not language. But surprisingly, intermediate layers of \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) perform well on both language modeling and spatial tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.885, + 0.822, + 0.902 + ], + "angle": 0, + "content": "for other downstream tasks (AIMv2: tracking, grounding, detection; DINOv2: VQ&A, grounding)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.95 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.158 + ], + "angle": 0, + "content": "\\(\\mathrm{PE}_{\\mathrm{core}}\\) exhibits similar behavior, but with unexpected results. Unlike the others, in earlier layers of the network \\(\\mathrm{PE}_{\\mathrm{core}}\\) performs well on all tasks, often matching or exceeding the leading models. Remarkably, PE has intermediate layers that perform near to or on par with AIMv2 for language tasks and DINOv2 for spatial tasks, despite being trained with contrastive loss. Depth estimation is particularly noteworthy, as contrastive encoders are not typically considered state-of-the-art in that area." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.164, + 0.888, + 0.256 + ], + "angle": 0, + "content": "However, in almost all cases this strong performance diminishes rapidly towards the end of the network. In fact, the performance of \\(\\mathrm{PE}_{\\mathrm{core}}\\) in the final layer is abysmal for certain tasks, such as LLM-based grounding (the reason for which will become apparent in §5). This behavior is less pronounced the closer the downstream task is to the pretraining method, suggesting an alignment problem. Specifically, a well-tuned large-scale contrastive model can learn general embeddings in the process of fitting its objective, but it fails to output them. Therefore, to reveal these embeddings, the model must be subsequently aligned to downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.267, + 0.888, + 0.314 + ], + "angle": 0, + "content": "Analysis. The finding that pure CLIP models possess features which match the performance of state-of-the-art pretraining methods in their specialized domains is new. In fact, recent work [31] has shown the opposite—that CLIP models fail to scale on downstream tasks. We next investigate how our approach yields these results." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.32, + 0.439, + 0.547 + ], + "angle": 0, + "content": "To start, we perform layerwise frozen feature analysis on COCO detection. \\(\\mathrm{PE}_{\\mathrm{core}}\\) was particularly \"peaky\" on this task in Fig. 8, with its best layer on par with DINOv2, but last layer significantly worse. We already ablated each change we made from vanilla CLIP in Fig. 2 using a ViT-L/14 model. So to retrace our steps, we run frozen feature analysis on those checkpoints. For efficiency, we perform this experiment at a lower resolution and only sample even layers. In Fig. 9, we report COCO box mAP for the last and best layers for each cumulative ablation, along with the index of the best layer. Further, we plot the layerwise performance for each change in Fig. 10." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.553, + 0.439, + 0.599 + ], + "angle": 0, + "content": "Surprisingly, the simple changes we made in §2.1 to construct our pretraining recipe overall improved the best layer's performance by" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.599, + 0.888, + 0.72 + ], + "angle": 0, + "content": "almost \\(10\\,mAP\\) over vanilla CLIP! Some changes like high resolution (5) and RoPE (6) improving spatial features is to be expected, but unexpectedly data augmentation (8) and especially progressive resolution (2) help considerably. It is possible that contrastive pretraining is prone to overfit to the \"global\" nature of the task through \"global tokens\" [23]. However, as the model cannot maintain global tokens in the same place due to the resolution progressively changing, it is forced to be more robust. Also of note is that both progressive resolution (2) and attention pooling (7) move the argmax layer deeper into the network (rightmost column of Fig. 9). Attention pooling in particular alters the whole shape of the layerwise performance curve (Fig. 10), while the other changes typically only raise or lower it." + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.734, + 0.353, + 0.848 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.86, + 0.35, + 0.888 + ], + "angle": 0, + "content": "Figure 10 Layer Analysis corresponding to the results presented in Fig. 9." + }, + { + "type": "text", + "bbox": [ + 0.37, + 0.727, + 0.888, + 0.894 + ], + "angle": 0, + "content": "Potentially more interesting is what did not improve performance: specifically, increasing the batch size (3) and using LAMB with a high learning rate (4). Both of these changes explicitly help the model fit the CLIP loss better, which after a certain point may not improve the general features. Moreover, while the best layer overall improved significantly, the last layer performance stagnated after (2). This suggests that constructing the global CLIP token requires a substantial \"decoder\" (in this case, 6 layers for the final L/14 model). Although the features of this decoder are beneficial for some tasks (e.g., Visual Q&A as shown in Fig. 8), they are not general. Nevertheless, this does not prevent the model from learning general features; it merely limits their expression in the output." + }, + { + "type": "image", + "bbox": [ + 0.471, + 0.329, + 0.895, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.46, + 0.533, + 0.888, + 0.589 + ], + "angle": 0, + "content": "Figure 9 The Downstream Effects of Robust Pretraining. The ViT-L/14 checkpoints from Fig. 2 evaluated as frozen features on COCO [76] using Mask R-CNN [43]. We report the last layer performance, best layer performance, and the best layer's index." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.949 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.082, + 0.396, + 0.203 + ], + "angle": 0, + "content": "Scaling Behavior. Finding a simple, easily scalable vision pretraining method that produces generally useful features has been the white whale of the vision community for a while. Evidently, our robust recipe can enable contrastive pretraining to produce general features. So that begs the question, \"does it scale?\"" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.209, + 0.396, + 0.346 + ], + "angle": 0, + "content": "We can answer this question in the same way: by performing frozen feature layer analysis of our S/14, B/14, and L/14 scaling ablation checkpoints from Fig. 3. We report the result of that analysis in Fig. 11. We also include our final \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) model using the same setup, but note this is an estimate as our ablation and final schedules are different." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.353, + 0.399, + 0.611 + ], + "angle": 0, + "content": "Immediately, we see a stark contrast between the scaling behavior of the vanilla CLIP recipe and ours. While the vanilla recipe quickly plateaus at L scale (300M), the best layer of our robust pretraining recipe demonstrates scaling to G scale (2B) and potentially beyond—despite being trained with a decidedly non-spatially aligned global contrastive loss. However, this is the best layer. The last layer performance still stagnates for both the vanilla recipe and ours. This may be why prior work [31] finds contrastive pretraining to not scale for downstream tasks—CLIP loss obfuscates its general features even with our recipe, placing them several layers deep." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.617, + 0.396, + 0.844 + ], + "angle": 0, + "content": "However, this is just for a single spatial task. To see whether the trend is consistent, we repeat this scaling analysis on a wide variety of downstream language modeling tasks using the same frozen evaluation setup as Fig. 8 and report the results in Fig. 12. Surprisingly, the simple change in pretraining recipe improves scaling for most language tasks as well—including output-side grounding (RefCOCO). Note that in this benchmarking setup, the LLM never sees videos during training so the Video Q&A per-layer results are noisy. Yet, the best layer trend is still the same." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.851, + 0.395, + 0.866 + ], + "angle": 0, + "content": "Clearly, contrastive pretraining with our" + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.071, + 0.627, + 0.082 + ], + "angle": 0, + "content": "Object Detection" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.085, + 0.583, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.085, + 0.723, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.731, + 0.077, + 0.882, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.418, + 0.188, + 0.887, + 0.244 + ], + "angle": 0, + "content": "Figure 11 The Downstream Scalability of Robust Pretraining. Left: frozen feature layer analysis of the S/14, B/14, and L/14 models from Fig. 3 using the same setup as Fig. 9. Right: scaling behavior of the best layer for each model. Note: G is our final model and has a different schedule." + }, + { + "type": "image_caption", + "bbox": [ + 0.551, + 0.255, + 0.608, + 0.265 + ], + "angle": 0, + "content": "OCR Q&A" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.268, + 0.582, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.268, + 0.722, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.261, + 0.888, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.366, + 0.61, + 0.376 + ], + "angle": 0, + "content": "Visual Q&A" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.379, + 0.582, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.379, + 0.722, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.372, + 0.888, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.549, + 0.477, + 0.609, + 0.488 + ], + "angle": 0, + "content": "Captioning" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.49, + 0.582, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.49, + 0.722, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.483, + 0.887, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.589, + 0.608, + 0.599 + ], + "angle": 0, + "content": "Grounding" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.602, + 0.582, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.602, + 0.722, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.594, + 0.887, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.549, + 0.7, + 0.609, + 0.709 + ], + "angle": 0, + "content": "Video Q&A" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.713, + 0.582, + 0.804 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.712, + 0.722, + 0.804 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.702, + 0.887, + 0.804 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.418, + 0.816, + 0.887, + 0.859 + ], + "angle": 0, + "content": "Figure 12 Further Scalability Analysis. We repeat the analysis from Fig. 11 on a wide range of downstream tasks by adapting to a language model. Each category is an average of several downstream tasks (see §4)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.866, + 0.886, + 0.913 + ], + "angle": 0, + "content": "robust recipe produces strong general features that scale. However, these features are not going to be much use stuck in the middle of the network. To remedy this, in the remaining sections we will discuss methods for aligning these general features to the output of the network for both language modeling and spatial tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.949 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.11, + 0.082, + 0.56, + 0.101 + ], + "angle": 0, + "content": "4 Perception Encoder: Language Alignment" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.107, + 0.885, + 0.184 + ], + "angle": 0, + "content": "In §3 we have seen that \\(\\mathrm{PE}_{\\mathrm{core}}\\) already possesses useful features for vision-language modeling. In this section, we lift these features through alignment tuning to construct a new encoder, \\(\\mathrm{PE}_{\\mathrm{lang}}\\), specialized for multimodal large language models (MLLMs). Our principle is to design not only the most performant, but also the most general vision encoder for use in MLLM development. To this end, we want a single language-aligned encoder that performs well across language models, across input resolutions, and for a wide variety of MLLM tasks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.195, + 0.888, + 0.288 + ], + "angle": 0, + "content": "MLLM Evaluation Tasks. In this section, our main testbed is to adapt vision encoders to MLLMs and test on various MLLM tasks. We evaluate the downstream performance of each MLLM across five task categories: (1) OCR, Chart, Document Q&A on ChartQA [165], DocVQA [91], InfoVQA [92] and AI2D [57]; (2) Visual Q&A on TextVQA [125], OK-VQA [118], POPE [73], and VQAv2 [40]; (3) Captioning on Flicker [157], COCO [76], and No Cap [1]; (4) Video Understanding on VideoMME [38], STAR [148], TGIF-QA [53], EgoSchema [89], MVBenchmark [68], and PerceptionTest [105]; and finally (5) Grounding on RefCOCO [56]." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.299, + 0.395, + 0.315 + ], + "angle": 0, + "content": "4.1 Language Alignment Method" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.322, + 0.885, + 0.43 + ], + "angle": 0, + "content": "We begin by searching for the optimal language alignment method. We design our alignment tuning based on the midtraining stage of Perception Language Model (PLM) [21], which is to adapt \\(\\mathrm{PE}_{\\mathrm{core}}\\) to a pretrained decoder-only LLM (Llama 3 [82]) connected by a vision projector. We start with \"warmup\" training stage with autoregressive next-token prediction loss on 1M image-text samples from pretraining, where everything but the projector is frozen. Then, we proceed to finetune all parameters on 70M data samples [21] covering natural images, documents/charts/diagrams, and videos, using the same next-token prediction loss. After completing this language alignment, we extract the vision encoder from the model and refer to it as \\(\\mathrm{PE}_{\\mathrm{lang}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.436, + 0.569, + 0.527 + ], + "angle": 0, + "content": "To arrive at the optimal training configuration presented in PLM [21], we first conduct ablation studies using a 20M subset of the data. In Tab. 9, we ablate the LLM sizes, training parameters, vision projector types, output layers to project, and encoder regularization. We evaluate across OCR Q&A, Captioning, Visual Q&A, and Video Q&A and find the best configuration." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.534, + 0.568, + 0.595 + ], + "angle": 0, + "content": "LLM Setup. We explore different scales (1B or 3B parameters) and freezing weights of the LLM. We observe that going from 1B to 3B parameters increases average score by 1.6 points \\((76.5\\rightarrow 78.1)\\). Unfreezing the LLM boosts this number to 78.4." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.602, + 0.568, + 0.647 + ], + "angle": 0, + "content": "Vision Projector. Using a 2-layer MLP vision projector instead of a linear layer improves the average score from 77.2 to 78.1, while only adding few parameters (13.5M → 27M)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.655, + 0.568, + 0.699 + ], + "angle": 0, + "content": "PE Output Layer. As shown in §3, \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) has intermediate layers that perform significantly better than the last layer when used as features for certain tasks. However, it is not clear if that" + }, + { + "type": "table", + "bbox": [ + 0.594, + 0.443, + 0.892, + 0.644 + ], + "angle": 0, + "content": "
LLM scaleLLM unfrozen Regularization?ProjectorLayerAvg.OCR Q&A Average of 4
Average of 3Captioning Average of 3
LLM Setup
1BMLP4776.560.7115.176.054.0
3BMLP4778.165.9115.776.654.1
3BMLP4778.465.8117.676.353.7
Vision Projector
3BLinear4777.264.5114.176.553.7
3BMLP4778.165.9115.776.654.1
PE Output Layer
3BMLP5075.956.6116.776.553.7
3BMLP4778.165.9115.776.654.1
3BMLP4176.965.5112.875.453.9
PE Regularization
3BMLP4779.969.0117.577.455.6
3BMLP4780.168.7118.377.056.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.591, + 0.647, + 0.886, + 0.689 + ], + "angle": 0, + "content": "Table 9 Language Alignment. We find the best configuration to language align \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) using autoregressive language training." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.7, + 0.886, + 0.731 + ], + "angle": 0, + "content": "same behavior applies when finetuning. We test applying the projector to layers 41, 47, and 50 (the last layer), and find that layer 47 works best. Incidentally, this is also the optimal layer for frozen VQ&A in Fig. 8." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.738, + 0.888, + 0.784 + ], + "angle": 0, + "content": "PE Regularization. We apply LayerScale [135] and DropPath [50] to the vision encoder during the alignment, for stabilizing training. This improves the 78.1 average score to 79.9 (+1.8 points). Unfreezing the LLM boosts this number further to 80.1. We choose this configuration (last row) as our final alignment setup." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.79, + 0.888, + 0.868 + ], + "angle": 0, + "content": "To construct \\(\\mathrm{PE}_{\\mathrm{lang}}\\), we scale this recipe up the 70M samples mentioned above (more details in [21]). In summary, we use a pretrained Llama3.2 3B, unfrozen, with a 2-layer MLP as a vision projector on top of layer \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) layer 47 (with the last 3 discarded) and regularize the encoder with LayerScale and DropPath. Compared to the 20M sample ablation setting in Tab. 9, the final \\(\\mathrm{PE}_{\\mathrm{lang}}\\) trained on 70M total samples gives another +2.1 points to 82.2 on the average across OCR Q&A, Captioning, Visual Q&A, and Video Q&A." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.878, + 0.888, + 0.91 + ], + "angle": 0, + "content": "Effects. The goal of alignment tuning is to lift the strong features found in intermediate layers of \\(\\mathrm{PE}_{\\mathrm{core}}\\) described in §3 to the end of the network. To see if we actually accomplished that, we perform the same layerwise" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.511, + 0.385 + ], + "angle": 0, + "content": "analysis as in Fig. 8 on our final \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\) model and compare it to the original \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) checkpoint it was initialized from. We present the results of this analysis in Fig. 13, and immediately we see that language alignment was a success: across all categories, the performing layer for the aligned model was the last, no matter the performance of the original checkpoint. Notably, our \\(\\mathrm{PE}_{\\mathrm{lang}}\\) training mix did not contain grounding data, which means that this significantly lifted grounding performance is entirely due to the strong intermediate grounding features in \\(\\mathrm{PE}_{\\mathrm{core}}\\) now being aligned to the end of the network. Moreover, specific domains such as OCR Q&A that were represented in the training mix see a significant boost to performance compared to even the best layer of \\(\\mathrm{PE}_{\\mathrm{core}}\\), which was already strong. Thus, with an order of magnitude fewer samples compared to pretraining, we were able to language align \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) to create a single, strong encoder for all visual language modeling tasks. Following this success, we align \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}\\) in a similar manner to construct \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{L}\\) (see [21])." + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.086, + 0.716, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.087, + 0.885, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.549, + 0.203, + 0.714, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.721, + 0.202, + 0.887, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.533, + 0.328, + 0.888, + 0.385 + ], + "angle": 0, + "content": "Figure 13 Language Alignment. We analyze how language alignment changes the internal features of PE. Similar to our \\(\\mathrm{PE}_{\\mathrm{core}}\\) analysis in Fig. 12, we extract \\(\\mathrm{PE}_{\\mathrm{lang}}\\) and adapt each layer to a new LLM." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.402, + 0.51, + 0.419 + ], + "angle": 0, + "content": "4.2 Comparisons with Existing Vision Encoders" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.426, + 0.888, + 0.504 + ], + "angle": 0, + "content": "We compare \\(\\mathrm{PE}_{\\mathrm{core}}\\) and \\(\\mathrm{PE}_{\\mathrm{lang}}\\) with other vision encoders that are popular choices in MLLM literature: MetaCLIP [152], SigLIP2 [138], CLIP [106], AIMv2 [37], DINOv2 [98], and InternViT2.5 [18]. Overall, these encoders span several different pretraining losses (e.g., contrastive, captioning, self-supervised, and mixed supervision), encoder sizes (from 300M to 6B parameters), and resolutions (from 224 to 512). For all vision encoders, we find the best intermediate layers to train MLLM for fair comparison (more in Appendix B.2)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.514, + 0.888, + 0.561 + ], + "angle": 0, + "content": "MLLM Benchmarking Setup. We connect each vision encoder, including \\(\\mathrm{PE}_{\\mathrm{lang}}\\), to a language decoder with a fresh 2-layer MLP projector. Similar to the alignment stage, we first train only the projector on a subset of 1M image-text pairs from pretraining. Then, we train both the projector and LLM on 2.6M visual Q&A pairs," + }, + { + "type": "table", + "bbox": [ + 0.108, + 0.569, + 0.895, + 0.838 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [11]Avg. Ground RefLOCOg+ [56]Avg. VideoVideoMME Acc. [38]STAR Acc. [148]TGF-IQA Acc. [53]EgoScheme Acc. [89]MV-Bench Acc. [68]PerceptionTest Acc. [105]
CharQA Acc. [165]DocVQA Acc. [91]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1444.947.933.028.770.268.447.662.586.976.5110.587.5130.0114.160.653.946.151.066.458.649.451.9
MetaCLIP-G [152]1.8B224/1444.847.633.127.970.668.848.263.586.576.9111.186.5132.1114.860.553.145.050.766.456.048.751.9
PElang G†1.7B*224/1453.761.347.132.274.171.855.165.386.879.8116.491.0136.9121.265.755.547.355.768.959.648.652.9
576 Tokens per Image
CLIP [106]0.3B336/1453.561.749.532.870.172.760.763.987.378.9113.392.0132.9115.065.054.246.352.168.657.448.552.3
AIMv2-L [37]0.3B336/1453.361.648.032.171.473.762.764.387.780.1115.290.9135.6119.263.352.544.350.967.554.444.953.2
AIMv2 L Dist. [37]0.3B336/1453.761.149.431.572.774.162.864.888.380.3117.894.7137.5121.262.653.844.352.465.057.450.053.6
SigLIP2-so [138]0.4B384/1658.969.058.335.273.176.869.867.288.781.6116.592.1137.7119.867.454.545.553.167.257.649.354.5
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1466.976.873.641.176.176.268.566.089.181.3119.796.1139.6123.468.958.148.758.970.561.852.755.9
1024 Tokens per Image
InternViT 2.5 L [18]0.3B448/1460.674.159.235.973.174.265.464.487.679.6112.388.4133.7114.966.950.645.244.862.754.246.050.5
SigLIP2-so [138]0.4B512/1663.372.169.339.072.777.974.866.089.081.8117.493.5138.3120.269.655.846.255.467.062.050.054.5
PEcore L0.3B448/1459.468.762.536.669.774.767.764.388.378.7112.789.6133.4114.959.750.941.751.261.652.647.450.6
PElang L0.3B448/1471.181.081.946.475.077.173.065.589.380.8117.394.3137.3120.170.556.547.057.268.059.852.354.7
DINOv2-g [98]1.1B448/1430.019.614.724.261.561.019.360.488.675.8109.486.5131.6110.164.949.539.752.160.146.847.450.8
AIMv2 3B [37]2.7B448/1448.940.553.933.967.273.064.164.085.278.9115.793.8135.2118.136.154.645.154.566.755.451.754.3
InternViT2.5-6B [18]5.5B448/1459.972.359.435.272.575.568.964.988.280.2115.092.2136.3116.368.049.644.547.062.645.848.948.5
PEcore G1.9B448/1460.869.965.436.771.173.365.960.788.478.0112.591.6133.6112.466.652.042.353.162.951.448.853.6
PElang G†1.7B*448/1472.480.584.448.376.478.175.265.490.181.8120.196.6140.0123.671.358.048.060.169.462.052.456.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.841, + 0.886, + 0.898 + ], + "angle": 0, + "content": "Table 10 MLLM Results with Llama 3.18B. We compare various vision encoders at their native resolution using Llama 3.1-instruct 8B [82] as the language model. The table compares models of similar class in number of vision tokens and parameters. \\(\\mathrm{PE}_{\\mathrm{lang}}\\) shows strong performance across all benchmarks, including against models \\(3\\times\\) its size. \\({}^{*}\\mathrm{PE}_{\\mathrm{lang}}\\) has 1.7B parameters since we discard the last 3 layers during language alignment. \\(\\dagger\\) Interpolated without extra training." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.128 + ], + "angle": 0, + "content": "image captions, and image grounding samples (see Appendix B.2 for details). We benchmark at the native resolution of each encoder (with higher resolution tiling results in Appendix C.4). Finally, we ablate over two language decoders, Llama 3.1 8B [82] and QwenLM 2.5 7B [155], to measure generalization across LLMs." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.139, + 0.888, + 0.278 + ], + "angle": 0, + "content": "Results. Tab. 10 shows benchmarks results for native resolution input across existing encoders, \\(\\mathrm{PE}_{\\mathrm{core}}\\) and \\(\\mathrm{PE}_{\\mathrm{lang}}\\). Notably, AIMv2 [37], InternViT2.5 [18], SigLIP2 [138] and \\(\\mathrm{PE}_{\\mathrm{lang}}\\) are trained jointly with a language decoder using next token prediction objective, and thus they perform better overall compared to the base contrastive and self-supervised models across all the metrics. However, \\(\\mathrm{PE}_{\\mathrm{lang}}\\) uses a fraction of the training FLOPs for language alignment tuning, while significantly outperforming all vision encoders by large margin (an average of \\(+3.5\\) points for G and \\(+2.0\\) points for L). Similarly, when tiling with 4 tiles and 1 thumbnail (see Appendix Tab. 30), both \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{L}\\) and \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\) outperform all existing vision encoders, including InternViT2.5 [18], which was specifically pretrained in a tiling setting and with grounding data. Appendix C.4, shows a breakdown of the RefCOCO results, as well as results for tiling with higher resolution." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.286, + 0.889, + 0.379 + ], + "angle": 0, + "content": "Transferability. As \\(\\mathrm{PE}_{\\mathrm{lang}}\\) is aligned with Llama 3.2-instruct 3B, we conduct a separate set of experiments to check if our model performs well with a different base LLM. In Tab. 11 we repeat the native resolution comparison with QwenLM 2.5 7B [155]. Interestingly, \\(\\mathrm{PE}_{\\mathrm{lang}}\\) not only outperforms all vision encoders in this setting, but it also outperforms InternViT2.5 [18], which is specifically aligned to QwenLM 2 [154] throughout midtraining. In fact, \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\) with QwenLM even improves its performance with Llama in some cases like with OCR Q&A and video benchmarks, emphasizing the generality of our language alignment." + }, + { + "type": "table", + "bbox": [ + 0.108, + 0.386, + 0.896, + 0.585 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Avg. Ground, ReCOC%+ [56]
CharQATextVQAFlicker CIDEr [157]Avg. Ground, ReCOC%+ [56]
Acc. [165]Acc. [125]COCO CIDEr [76]STAR Acc. [148]
DocVQADocVQANo Cap CIDEr [1]EGoSema Acc. [89]
Acc. [91]Acc. [92]Avg. Ground, ReCOC%+ [56]VideoOME Mec Aoc. [38]
Aoc. [57]Aoc. [73]Avg. VideoStAR Acc. [68]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1660.572.059.136.774.366.269.065.489.281.1116.391.6137.3120.070.057.051.355.866.061.051.955.7
SigLIP2-g-opt [138]1.1B384/1660.871.060.436.775.276.870.365.689.581.8118.896.4139.0121.169.958.352.057.668.162.052.857.4
PElang G†1.7B*336/1466.877.572.441.176.476.067.965.489.181.5118.894.6139.5122.370.160.254.661.769.863.654.357.2
1024 Tokens per Image
InternViT2.5 [18]0.3B448/1460.375.461.136.268.474.265.663.787.879.5112.188.5133.5114.168.155.850.354.766.659.050.653.8
SigLIP2-so [138]0.4B512/1666.377.271.942.473.977.974.265.689.981.8117.193.0138.0120.370.555.950.357.367.262.650.347.4
PEcore L0.3B448/1463.573.967.440.572.275.769.264.089.480.2113.388.7135.2115.966.557.349.657.867.760.852.355.5
PElang L0.3B448/1470.280.680.746.073.576.872.864.189.481.0116.493.4137.6118.170.458.351.659.867.462.253.455.4
DINOv2 [98]1.1B448/1431.321.714.724.664.361.018.959.588.976.9110.187.3132.1110.869.354.346.956.563.456.849.752.2
AIMv2 3B [37]2.7B448/1466.076.770.541.475.277.974.266.289.481.9119.296.4139.2122.067.656.345.958.067.860.851.453.9
InternViT2.5 [18]5.5B448/1464.278.265.339.673.676.470.164.589.381.7117.695.9138.4118.672.856.150.359.167.356.651.152.2
PEcore G1.9B448/1464.875.968.841.672.975.267.962.489.780.7113.191.7135.2112.370.557.048.758.366.960.852.954.5
PElang G1.7B*448/1472.981.683.749.576.777.974.964.590.381.9118.994.6139.8122.372.160.454.162.568.366.654.256.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.588, + 0.888, + 0.618 + ], + "angle": 0, + "content": "Table 11 MLLM Results with QwenLM 2.5 7B. Same setting as Tab. 10, but with QwenLM2.5 7B [155] as the language model. Although \\(\\mathrm{PE}_{\\mathrm{lang}}\\) is aligned to Llama3.2 3B, the language alignment transfers well to a different language model." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.632, + 0.888, + 0.756 + ], + "angle": 0, + "content": "System-Level MLLM Comparison. In Tab. 12, we conduct a system-level comparison to the state-of-the-art open-access MLLMs: LLaVA-OneVision 7B [66], Gemma3 12B [132], Molmo-D 7B [25], Qwen2 VL 7B [144], InternVL 2.5 8B [18] and the very recent InternVL 3 8B [168]. Each baseline uses a contrastively pretrained ViT (SigLIP-so400M [160], CLIP-L [106], DFN-H [33], and InternViT 2.5 300M [18]). For our PLM-8B we use \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\) as the vision encoder with 36 tiles for images and 32 frames for video and Llama 3.1-instruct 8B as the language decoder (more details in [21]). We show numbers from their respective works or evaluate them ourselves if they are not reported (except for Gemma and InternVL 3). PLM-8B outperforms all other models tested, emphasizing that \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\) can be used to drive strong results across a wide range of tasks." + }, + { + "type": "table", + "bbox": [ + 0.112, + 0.763, + 0.891, + 0.884 + ], + "angle": 0, + "content": "
ModelEncoderOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QA Acc. [165]Doc.VQA Acc. (test) [91]Info. QA Acc. (test) [92]Avg. VQA Text.VQA Acc. [125]OK-VQA Acc. [118]POPE Acc. [73]VQAV2 Acc. (val) [40]Avg. Cap. Flicker CIDEr [157]COCO CIDEr [76] No Cap CIDEr [1]Avg. Video Video.MME Acc. [38]STAR ACC. [148]TGIF-QA Acc. [53]EgoScheme (test) Acc. [89]MV.Bench Acc. [68]PerceptionTest Acc. (test) [105]
LLaVA-OV 7B [66]SigLIP-so400M81.480.086.768.890.179.977.369.689.283.579.555.770.7112.163.857.766.077.265.257.158.1
Gemma3 12B [132]SigLIP-so400M-75.787.164.9--67.7--71.6----------54.9
Qwen2 VL 7B [144]DFN-H86.683.694.576.591.780.983.667.988.383.893.779.9102.598.767.762.967.381.865.461.666.9
InternVL 2.5 8B [18]InternViT 2.5-300M87.084.693.077.692.879.979.369.290.680.6113.096.5125.8116.772.960.677.691.366.272.668.9
InternVL 3 8B [168]InternViT 2.5-300M87.286.692.776.892.6-80.2-91.1------66.3---75.4-
PLM-8BPElangG88.485.594.680.992.782.986.569.689.985.6127.4105.6146.7129.977.958.384.995.568.877.182.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.886, + 0.888, + 0.917 + ], + "angle": 0, + "content": "Table 12 MLLM System-Level Comparison. We show a system-level comparison between PLM-8B based on \\(\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}\\) and popular open-access models of similar LLM scale using existing encoders. We report test set results where specified." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.51, + 0.95 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.082, + 0.537, + 0.101 + ], + "angle": 0, + "content": "5 Perception Encoder: Spatial Alignment" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.113, + 0.889, + 0.236 + ], + "angle": 0, + "content": "While language alignment with a pretrained LLM decoder is well-established, the best way to spatially align a model is not obvious. As shown in §3, \\(\\mathrm{PE}_{\\mathrm{core}}\\) already has features that perform well for spatial tasks. However, the layer that performs the best for higher level spatial tasks like detection or depth estimation (layer \\(\\sim 40\\)) is vastly different than the layer that performs the best for a pure spatial task like tracking (layer \\(\\sim 30\\)). While we were able to ignore this disparity during language alignment by aligning to an LLM decoder that could do all tasks, classical spatial tasks have decoders that come in all shapes and sizes. It would be impractical to simply align the model using all downstream decoders mirroring language alignment. Thus, we must first answer the question, what is happening in the features at those layers to make them useful for spatial tasks?" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.254, + 0.339, + 0.27 + ], + "angle": 0, + "content": "5.1 Core Feature Analysis" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.278, + 0.344, + 0.446 + ], + "angle": 0, + "content": "We begin by analyzing the spatial properties of the features for \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) in the range of layers where it performed optimally for zero-shot tracking in §3. In Fig. 14, we plot (1) the pairwise feature cosine similarity between the pink token and all others, (2) the head average attention map for that token, and (3) the full attention matrix \\((HW\\times HW)\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.456, + 0.344, + 0.516 + ], + "angle": 0, + "content": "An 18 Layer Decoder. Remarkably, the cause for the tracking performance peak at layer 32 is abundantly clear from observing" + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.258, + 0.888, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.363, + 0.467, + 0.886, + 0.51 + ], + "angle": 0, + "content": "Figure 14 \\(\\mathsf{PE}_{\\mathrm{core}}\\mathsf{G}\\) Feature Analysis. To understand the dichotomy between optimal \\(\\mathsf{PE}_{\\mathrm{core}}\\) features for spatial tasks observed in Fig. 8, we analyze the spatial properties of the features between layers 30 and 34." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.516, + 0.888, + 0.577 + ], + "angle": 0, + "content": "the visualizations. Up until layer 32, the attention maps remain local. However, that changes abruptly at layer 33, at which point several tokens in the background of the image become \"global\" tokens. As shown by the vertical lines in the full attention matrix, starting from layer 33 every token attends to them. Thus, every layer 33 and up become part of a decoder for global information." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.584, + 0.888, + 0.677 + ], + "angle": 0, + "content": "This is not a new phenomenon. Recent work [23] shows this happening in all modern vision transformers above L scale. But notably these \"global tokens\" are not necessarily harmful. Given the optimal layer for most tasks in Fig. 8 lies within the global token region, the information they aggregate is useful downstream. However, tracking in §3 is zero-shot and relies purely on spatial correspondences, meaning it cannot make use of the global tokens. This explains why tracking peaks right before their introduction, while tasks that rely on semantic understanding or have larger decoders that can benefit from them do well with the later layers." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.694, + 0.377, + 0.71 + ], + "angle": 0, + "content": "5.2 Spatial Alignment Method" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.718, + 0.888, + 0.81 + ], + "angle": 0, + "content": "Given the analysis in §5.1, we have two objectives in creating a spatial alignment method: (1) we must preserve the optimal semantic information of the model (including the global tokens) that peaks around layer 40, and (2) we must do so while emphasizing local alignment in service of spatial tasks with shallow decoders. The first can be easily achieved by aligning with the model's own features (e.g., with MaskFeat [147]), but the second is more challenging. To accomplish this, we employ the Segment Anything Model (SAM) 2.1 [111] in a novel way to enforce spatial correspondence information in PE." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.821, + 0.888, + 0.898 + ], + "angle": 0, + "content": "Retaining Semantics. To retain the strong semantic features from \\(\\mathrm{PE}_{\\mathrm{core}}\\), we finetune the model with itself as a teacher. Specifically, we train the model to minimize the cosine similarity between its last layer and the frozen layer 41 features of its initialization (a layer around the peak for many tasks in Fig. 8). On its own this would be a tautology, so we apply heavy regularization to the student: DropPath [50] and LayerScale [135] similar to language alignment, as well as performing MaskFeat [147] with \\(75\\%\\) masking. We keep the teacher" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.937, + 0.51, + 0.95 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.113 + ], + "angle": 0, + "content": "fixed in contrast to other state-of-the-art spatial models, which all employ an EMA teacher [98, 138]. This could potentially help, but we opt for simplicity." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.124, + 0.491, + 0.305 + ], + "angle": 0, + "content": "Encouraging Locality. While we could \"retain\" locality by self-distilling from layer 32 features, that may be less effective as we are already distilling another layer of the model. Instead, we turn to a model that is explicitly tuned for locality: SAM [58, 111]. Notably, several works [110, 116, 119] have shown SAM to not be an effective teacher when distilling from multiple sources (though recently [45] has shown it can help with some tricks). However, upon observation of the raw features of SAM 2.1-L (Fig. 15), the main problem may be the same one we are currently trying to solve: SAM has global tokens as well! In this case," + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.115, + 0.888, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.257, + 0.886, + 0.3 + ], + "angle": 0, + "content": "Figure 15 SAM 2.1 Feature Similarity. The cosine similarity between the pink marked token and all others for SAM 2.1-L [111] features vs. our proposed mask logit features." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.305, + 0.804, + 0.32 + ], + "angle": 0, + "content": "they appear as dark spots in a grid-like arrangement across all examples in Fig. 15 raw features." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.327, + 0.888, + 0.449 + ], + "angle": 0, + "content": "Using the features of a model that itself has global tokens to mitigate the effect of global tokens is dubious at best. But, we don't have to use SAM's features to learn locality. At its core, SAM is a model that transforms points into spatially contiguous masks of select object. If what we want is smooth, locally consistent features, we can use the mask predictions themselves. Specifically, we query SAM 2.1-L with 1024 points arranged in a \\(32 \\times 32\\) grid. For each point, SAM returns a \\(H \\times W\\) mask logit the size of the image, which it normally would threshold and NMS. However, we instead concatenate those logits into a \\(H \\times W \\times 1024\\) tensor and use that as the feature map for alignment. This explicitly produces locally well-aligned features compared to the underlying feature space and has no spatial artifacts caused by global tokens, as shown in Fig. 15." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.456, + 0.888, + 0.547 + ], + "angle": 0, + "content": "Then to align, we distill the spatial correspondences between tokens by computing their pairwise cosine similarity for both the student and the teacher (creating a \\( HW \\times HW \\) matrix for each) and aligning them with MSE loss. Unlike SAM's underlying feature space (which [45] shows may be brittle to interpolation), the mask logit features are robust to interpolation, so we simply interpolate them down and train at the \\( \\mathrm{PE}_{\\mathrm{core}} \\) model's original 448px resolution. Finally, like for self-distillation we add the same masking and regularization. For both teachers, we apply loss to all tokens and add no extra parameters other than LayerScale." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.558, + 0.487, + 0.68 + ], + "angle": 0, + "content": "Effects. Again, the goal of alignment is to lift the strong features already learned by the core model as shown in §3. Thus, like we did for language alignment in §4.1, we perform layerwise frozen feature analysis on spatial tasks in Fig. 16. This time, we evaluate the original \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) checkpoint as well \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) aligned to its own layer 41, to SAM 2.1 mask logits, and finally both. We denote aligning to both as \\(\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.687, + 0.49, + 0.898 + ], + "angle": 0, + "content": "Aligning purely based on the original model's layer 41 features performs well on detection, depth, and semantic segmentation, but falls short for zero-shot tracking, where precise locality is necessary to define boundaries between objects. In contrast, aligning to SAM 2.1 mask logits lowers last layer performance on every task except for tracking, where it significantly improves performance. Understandably, this is because the mask logits have little semantics (see Fig. 17). Thus, the optimal approach is to combine both teachers. As a result, \\(\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}\\) not only lifts the features for all tasks to the end of the network, but it also improves over self-alignment alone. Notably, \\(\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}\\) s tracking performance is lower than" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.898, + 0.882, + 0.913 + ], + "angle": 0, + "content": "the SAM-aligned model, but it is still ahead of other methods while being a generally good model, see §5.3." + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.554, + 0.695, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.554, + 0.877, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.677, + 0.695, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.677, + 0.877, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.51, + 0.833, + 0.887, + 0.889 + ], + "angle": 0, + "content": "Figure 16 Spatial Alignment. We analyze how our two spatial alignment methods individually change the internal features of \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\). Then we combine both alignment methods to create \\(\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}\\) (see Appendix B.3.1)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.082, + 0.474, + 0.309 + ], + "angle": 0, + "content": "Last Layer Feature Visualization. In Fig. 17, we visualize the last layer features for the \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) and the 3 aligned models, with similar colors denoting similar features. In the first column, we see why the last layer performance of \\(\\mathrm{PE}_{\\mathrm{core}}\\) is so poor: while the last layer features contain information about the salient objects, they seem to have lost spatial coherence. Aligning to the model's own layer 41 features fixes this, but its spatial quality is lacking. In contrast, the model aligned to SAM 2.1 mask logits has locally clear features, but without semantics (similar objects have dissimilar features, see row 1 cats and row 2 cows). \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) using both teachers at once, retains the semantics of \\(\\mathrm{PE}_{\\mathrm{core}}\\) while producing high quality spatial features." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.07, + 0.875, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.494, + 0.271, + 0.886, + 0.314 + ], + "angle": 0, + "content": "Figure 17 Last Layer Visualization for the models in Fig. 16 using 3 dimensional PCA to map features to LCh color space (see Appendix B.3.2). More examples in Appendix C.5." + }, + { + "type": "table", + "bbox": [ + 0.116, + 0.332, + 0.532, + 0.459 + ], + "angle": 0, + "content": "
EncoderParamsResolutionTrackingSegmentationDepth
DAVIS (↑) [104]ADE20k (↑) [167]NYU (↓) [123]
BestLastIdxBestLastIdxBestLastIdx
OAI CLIP-L [106]0.3B224/1439.437.117/2439.438.319/24.366.39719/24
AIMv2-3B [37]2.7B448/1454.729.313/2441.631.920/24.311.32616/24
SigLIP-so [160]0.4B384/1448.736.316/2740.138.322/27.339.36921/27
SigLIP2-so [138]0.4B512/1651.445.315/2744.042.924/27.306.32925/27
SigLIP2-g-opt [138]1.1B384/1643.538.832/4042.141.334/40.302.32434/40
DINOv2-L [98]0.3B448/1458.758.223/2447.347.324/24.297.30823/24
DINOv2-g [98]1.1B448/1458.558.540/4048.748.437/40.279.29027/40
PEcoreG1.9B448/1456.842.832/5041.538.644/50.249.30939/50
PEspatialG1.9B448/1461.561.550/5049.348.949/50.262.27546/50
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.463, + 0.541, + 0.518 + ], + "angle": 0, + "content": "Table 13 Frozen Feature Dense Prediction including zero-shot tracking, semantic segmentation and depth estimation. We report best and last layer performance, along with which layer was best for each model. See Appendix B.3.3 for experimental settings." + }, + { + "type": "table", + "bbox": [ + 0.552, + 0.332, + 0.88, + 0.469 + ], + "angle": 0, + "content": "
EncoderParamsPretrain ResolutionLVIS [41]COCO [76]
APboxAPmaskAPboxAPmask
OAI CLIP-L [106]0.3B224/1445.041.954.047.5
MetaCLIP-G [152]1.8B224/1445.141.953.246.7
SigLIP-so [160]0.4B224/1445.041.954.447.6
MAE-L [44]0.3B224/1446.143.955.649.3
EVA02-L [35]0.3B224/1449.345.254.948.2
SigLIP2-so [138]0.4B512/1649.345.656.049.4
SigLIP2-g-opt [138]1.1B384/1652.948.557.150.2
DINOv2-L [98]0.3B518/1446.743.555.749.0
DINOv2-g [98]1.1B518/1451.547.357.250.0
PEcoreG1.9B448/1451.947.957.049.8
PEspatialG1.9B448/1454.249.357.850.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.551, + 0.474, + 0.882, + 0.515 + ], + "angle": 0, + "content": "Table 14 End-to-End Finetuning Detection and Segmentation using Mask R-CNN [43] and VitDet [72] in a controlled setting. Details in Appendix B.3.4." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.54, + 0.513, + 0.557 + ], + "angle": 0, + "content": "5.3 Comparisons with Existing Vision Encoders" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.569, + 0.888, + 0.677 + ], + "angle": 0, + "content": "Frozen Feature Dense Prediction. In Tab. 13, we compare different vision encoder's frozen features on three dense prediction tasks: DAVIS tracking [104] (J&F) following the training-free setting from [52, 107], ADE20k semantic segmentation [167] (mIoU) linear probing, and NYU depth estimation [123] (RMSE) with a DPT head [109]. For each model, we report both its best layer and last layer performance. Across the board, \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) performs outperforms other state-of-the-art spatial models, with its best features being much better aligned to the last layer than the \\(\\mathrm{PE}_{\\mathrm{core}}\\) it started from. Notably, SigLIP2, which during pretraining combines spatial, captioning, and contrastive losses [138] is not aligned well to the last layer in comparison." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.687, + 0.888, + 0.794 + ], + "angle": 0, + "content": "End-to-End Finetuning Detection and Segmentation. In Tab. 14, we compare \\(\\mathrm{PE}_{\\mathrm{core}}\\) and \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) with other popular vision encoders in the standard full-finetuning ViTDet [72] Mask-RCNN [43] setting using COCO [76] and LVIS [41] as benchmarks. In this controlled experiment, \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) is state-of-the-art among various vision backbones. This is significant, as contrastive encoders (especially large ones like MetaCLIP-G [152]) usually perform very poorly on detection, with smaller models often performing better. Typically, encoders only scale for detection if using spatial pretraining or a significant amount of detection data [98] is used to align them directly to downstream tasks. In contrast, \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) uses no detection data for alignment, making it general." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.804, + 0.551, + 0.911 + ], + "angle": 0, + "content": "System-Level Detection. In Tab. 15, we provide a system-level end-to-end finetuning comparison vs. the absolute state-of-the-art in COCO detection. With only Object365 [120] as extra detection data, \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) can match the performance of more complex models tuned for detection, while only using a simple DETR-style decoder [12, 99]. \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) marks the first general, contrastively pretrained model to accomplish this." + }, + { + "type": "table", + "bbox": [ + 0.582, + 0.802, + 0.875, + 0.879 + ], + "angle": 0, + "content": "
EncoderParamsDetectorCOCO APbox
SwinV2-G [80]3.0BHTC++ [14]62.5
Swin-L [79]0.3BDINO [161]63.2
EVA02-L [35]0.3BCascade [11]64.1
InternImage-G [145]3.0BDINO [161]65.3
EVA02-L [35]0.3BCoDETR [169]65.9
PEspatialG1.9BDETA [99]66.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.572, + 0.882, + 0.887, + 0.924 + ], + "angle": 0, + "content": "Table 15 System-Level Comparison on Detection. Comparing to the leading results on COCO [76] val2017. See Appendix B.3.5 for training recipe." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.112, + 0.082, + 0.284, + 0.098 + ], + "angle": 0, + "content": "6 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.114, + 0.884, + 0.19 + ], + "angle": 0, + "content": "Learning vision-semantic representations has long been the leading approach for developing foundational models in perception. By aligning visual and textual representations, these models excel not only in vision tasks such as zero-shot image classification and image-text retrieval [51, 106, 117], open-vocabulary detection [63, 94, 95] and segmentation [22, 28], but also serve as the basis for multi-modal large language models (MLLMs) [3, 5, 78, 93, 101, 134]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.201, + 0.886, + 0.384 + ], + "angle": 0, + "content": "Contrastive Language-Image Pretraining. The early works of Virtex [27], ICMLM [115], and ConViRT [163] developed the techniques for learning through contrastive objectives between vision and language modalities. Subsequently, vision encoders such as CLIP [51, 106] and ALIGN [54] scaled these techniques to much larger datasets and model sizes, popularizing vision-language contrastive learning. A series of open-weight contrastive models have been developed to enhance the performance and robustness of CLIP [33, 71, 117, 129, 152, 160]. For instance, SigLIP [160] replaces the traditional softmax with a sigmoid function in contrastive learning, while FLIP [74] employs masking techniques to expedite the training process. We are among this effort and build a state-of-the-art open Perception Encoder (PE) (§2.1). Other objectives that have proven useful for building visual encoders include captioning loss, which learns to predict image descriptions using a language model decoder and transfers well to downstream multi-modal language modeling tasks [37, 137]. Many works are now attempting to combine two or more objectives to address different downstream tasks through pretraining with multiple objectives [37, 158] or training sequentially [19, 66]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.395, + 0.884, + 0.515 + ], + "angle": 0, + "content": "Efficient Training. Various axes of efficient training of clip models have been explored. BASIC [102] and LAION [117] explored scaling the batch size up to 160K, and shows the benefits of large batch sizes during training. EVA-CLIP [130] uses LAMB optimizer [156] for large batch training of clip models. Rotary positional embedding (RoPE) [127] has been successfully adopted in large language models. In vision transformers [2, 48] adopted 2D rotatory positional embeddings. For data engine, a series of works focus on large-scale sourcing and filtering through efficient data curation [33, 39, 117, 152] and explore recaptioning training images using MLLMs or VLMs [32, 64, 96, 151]. We extend these concepts to build a video data engine and scale our model to function as one strong model for both image and video (§2.2)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.528, + 0.886, + 0.755 + ], + "angle": 0, + "content": "Best Embedding Layer Inside the Network. Typically, most vision encoders rely on the last layer to extract features for the task it is trained on. However, when trained on proxy or self-supervised tasks, the last layer is often not the ideal candidate for other tasks [8, 15, 16, 30, 85, 107, 121, 128, 142, 159, 166]. For example, when using image colorization as pretraining objective, [162, 166] showed that the middle layers were better at image classification compared to last layers. Subsequently, in iGPT [15], when trained for next token prediction, intermediate layers performed better at image classification. AIMv1 [30] also showed similar behavior for image based next token prediction with patch normalized MSE loss. Toto [107] showed this can be extended for next token prediction in videos, and intermediate layers are best for image classification, video classification, tracking and robotics. REPA [159] showed this behavior for image generation models, where the intermediate layers of SiT [85] has better linear probing accuracy compared to earlier or later layers. In CLIP models, CLIPer [128] identified that early layers in CLIP possess good spatial understanding. In contrast to these lines of work, in this paper, we first show this behavior is not limited to one class of encoders. Specifically, we show this behavior exists in a spatially self-supervised model [98], generative captioning model [37], and also in our own PE. Then we study this behavior for PE encoder in depth, and show it is possible for CLIP training to produce rich spatial and semantic features in intermediate layers (§3)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.766, + 0.884, + 0.903 + ], + "angle": 0, + "content": "Alignment Tuning. We explore alignment tuning for language (§4) and for spatial understanding (§5). For language alignment, we focus on adapting to multimodal large language models (MLLMs); for spatial alignment, we employ self-distillation of the models own features combined with a teacher for locality. In MLLM literature, midtraining—i.e., a middle stage of training used to exploit large-scale multimodal data—has been actively studied. LLaVA-OneVision [66], InternVL series [18, 19], QwenVL series [3, 144], and several other leading MLLMs [82, 132] adopt this paradigm. Our \\(\\mathrm{PE}_{\\mathrm{lang}}\\) can be seen as a variant of midtraining, but with one critical difference in principle: our goal is not to build the best MLLM, but to make the vision encoder the most general. Throughout §4, we benchmark our \\(\\mathrm{PE}_{\\mathrm{lang}}\\) across different language models, input resolution, on various tasks for image and video to show this generality. For spatial tasks, we utilize the hidden embeddings" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.081, + 0.888, + 0.175 + ], + "angle": 0, + "content": "in the intermediate layers. Recently, several works showed the effectiveness of distilling teacher model via representation alignment with cosine similarity. REPA [159] distilled an early layer features of DINO for image diffusion models, RADIO [110] used multi-teacher distillation (DINO, CLIP and SAM). The key idea is to borrow semantic understanding (e.g., CLIP) and spatial understanding (e.g., SAM, DINO) of a pretrained vision encoders. In our \\(\\mathrm{PE}_{\\mathrm{spatial}}\\), we exploit the intermediate features of \\(\\mathrm{PE}_{\\mathrm{core}}\\) for semantics, and a novel way to use SAM for spatial understanding." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.195, + 0.262, + 0.213 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.228, + 0.889, + 0.353 + ], + "angle": 0, + "content": "We have presented Perception Encoders (PE), a family of best-in-class foundation models comprising \\(\\mathrm{PE}_{\\mathrm{core}}\\), \\(\\mathrm{PE}_{\\mathrm{lang}}\\), and \\(\\mathrm{PE}_{\\mathrm{spatial}}\\). We have shown that \\(\\mathrm{PE}_{\\mathrm{core}}\\) can outperform models trained with WebLI and JFT-3B, which were previously the undisputed leaders in zero-shot image recognition, while also excelling in zero-shot video recognition. We have demonstrated that \\(\\mathrm{PE}_{\\mathrm{lang}}\\) can be used to build a multimodal language model [21] that is at the forefront of the field in terms of performance. We have established that \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) can match the long-standing state-of-the-art in object detection with a significantly simpler decoder. Throughout all of this, one conclusion is abundantly clear: Perception Encoder unlocks the potential to scale simple contrastive vision-language pretraining to address a wide range of downstream vision tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.361, + 0.889, + 0.485 + ], + "angle": 0, + "content": "Additional Contributors and Acknowledgments. We would like to thank Abhimanyu Dubey, Adel Ahmadyan, Andrew Westbury, Arkabandhu Chowdhury, Azita Shokrpour, Babak Damavandi, Chay Ryali, Cyprien de Lichy, Didac Suris Coll-Vinent, Dong Wang, Filip Radenovic, George Orlin, Han Zou, Harry Tran, Jitendra Malik, Joelle Pineau, Joseph Greer, Kavya Srinet, Kirmani Ahmed, Laura Gustafson, Lu Zhang, Muhammad Maaz, Natalia Neverova, Nicolas Carion, Oleksandr Maksymets, Ramya Raghavendra, Romy Luo, Ronghang Hu, Sam Doud, Sasha Mitts, Sean Bell, Shane Moon, Shuming Hu, Soerian Lieve, Stephane Kasriel, Valentin Gabeur, Vanessa Stark, Vignesh Ramanathan, Vivian Lee, Xuan Hu, Yang Li, and Ziyang Wang for their contributions and support for the project. And we thank you, the reader, for reading this far." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.082, + 0.334, + 0.101 + ], + "angle": 0, + "content": "A Video Data Engine" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.116, + 0.276, + 0.133 + ], + "angle": 0, + "content": "A.1 Video Caption" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.145, + 0.31, + 0.16 + ], + "angle": 0, + "content": "LLM Summarization prompt" + }, + { + "type": "title", + "bbox": [ + 0.119, + 0.171, + 0.348, + 0.183 + ], + "angle": 0, + "content": "LLM Summarization prompt 72 tokens" + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.186, + 0.767, + 0.198 + ], + "angle": 0, + "content": "Create a concise caption of a video using the provided metadata, video caption, and frame captions." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.199, + 0.876, + 0.221 + ], + "angle": 0, + "content": "TASK: Extract key information from the captions and combine it into an alt text format using single phrase or set of phrases that includes all relevant details." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.222, + 0.228, + 0.234 + ], + "angle": 0, + "content": "Steps to Follow:" + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.234, + 0.876, + 0.258 + ], + "angle": 0, + "content": "1. Review the metadata (title and description) for general context, you can rely it for entity names but do not rely on it as the primary source of information for your caption." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.259, + 0.706, + 0.27 + ], + "angle": 0, + "content": "2 . Blend title / description with video caption and frame captions for the main storyline" + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.271, + 0.47, + 0.281 + ], + "angle": 0, + "content": "3. Extract the most relevant and concise information." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.282, + 0.876, + 0.305 + ], + "angle": 0, + "content": "4. Combine extracted information into a alt text format using short phrase or set of phrases with approximately 120 tokens, considering special characters like comma as part of the token count." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.306, + 0.639, + 0.317 + ], + "angle": 0, + "content": "5. Prioritize including all key information over sentence structure or grammar." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.318, + 0.588, + 0.328 + ], + "angle": 0, + "content": "6. Minimize the use of special characters and focus of key information." + }, + { + "type": "list", + "bbox": [ + 0.121, + 0.234, + 0.876, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.33, + 0.216, + 0.34 + ], + "angle": 0, + "content": "What to Avoid:" + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.342, + 0.706, + 0.353 + ], + "angle": 0, + "content": "- Avoid adding or inferring information not present in the original metadata and captions." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.354, + 0.589, + 0.365 + ], + "angle": 0, + "content": "- Avoid using complex sentence structures or prioritizing sentence flow." + }, + { + "type": "list", + "bbox": [ + 0.121, + 0.342, + 0.706, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.367, + 0.742, + 0.377 + ], + "angle": 0, + "content": "Create a concise caption of the video based on the metadata, video caption, and frame captions." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.399, + 0.367, + 0.414 + ], + "angle": 0, + "content": "A.2 PE Video Dataset Details" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.423, + 0.886, + 0.468 + ], + "angle": 0, + "content": "PE Video is a dataset that we collected and curated from a licensed data source. The videos are high-resolution and high-quality with a focus on motion. The total number of videos is 1M. Among these, 120K videos have human-refined video captions, and we selected 15K from the 120K videos as a benchmark." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.486, + 0.36, + 0.501 + ], + "angle": 0, + "content": "A.2.1 Video Data Filtering Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.509, + 0.886, + 0.556 + ], + "angle": 0, + "content": "The goal of video data filtering is to identify videos that contain motions such as object motion, camera motion, interaction between objects, human actions, sequences of actions, and manipulation of objects, while rejecting videos with static scenes, like landscapes, or those that are artificial or highly edited." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.562, + 0.722, + 0.578 + ], + "angle": 0, + "content": "To achieve this, we created a video filtering pipeline consisting of the following steps:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.589, + 0.886, + 0.635 + ], + "angle": 0, + "content": "Step1: Compute motion features. For each video, we compute a list of features from video frames, including frames per second (fps), number of frames, number of I-frames, motion vector magnitude, and motion vector variance, using off-the-shelf tools like OpenCV [10]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.647, + 0.885, + 0.678 + ], + "angle": 0, + "content": "Step 2: Extract video frame features. For each video, we uniformly sample three frames and encode them using a DINOv2 model [98] and a SigLIP model [160]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.688, + 0.886, + 0.735 + ], + "angle": 0, + "content": "Step 3: LLM Features. For each video, we also run a multimodal large language model (LLM) like LlamaOnevision QwenLM 2 0.5B [66] to extract MLLM features. We composed a list of 26 questions and performed MLLM inference on the videos. The questions can be found here in §A.2.2." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.746, + 0.886, + 0.838 + ], + "angle": 0, + "content": "Step 4: Video Quality Scoring. We combine all the features collected so far and use a random forest model to predict a score between 0 and 5. To train the model, we manually annotated approximately 1,000 videos with scores between 0 and 5. A low score indicates that the video is almost static and can be nearly summarized by a single frame, while a high score indicates that there are multiple temporal events in the video, requiring several frames to accurately caption it. We use these annotated videos as training data to fit a random forest model for video quality score prediction." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.848, + 0.886, + 0.88 + ], + "angle": 0, + "content": "Step 5: We apply k-means clustering to the videos and rank them within each cluster. By selecting the top-ranked videos from each cluster, we effectively reduce the number of duplicated videos in the final dataset." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.589, + 0.886, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.112, + 0.082, + 0.331, + 0.095 + ], + "angle": 0, + "content": "A.2.2 LLM Feature Extraction" + }, + { + "type": "title", + "bbox": [ + 0.12, + 0.109, + 0.36, + 0.12 + ], + "angle": 0, + "content": "LLM Feature extraction question list" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.124, + 0.504, + 0.136 + ], + "angle": 0, + "content": "Is the camera capturing the scene static? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.137, + 0.503, + 0.147 + ], + "angle": 0, + "content": "Is the camera capturing the scene moving? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.148, + 0.464, + 0.159 + ], + "angle": 0, + "content": "Is the video capturing a landscape? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.16, + 0.483, + 0.171 + ], + "angle": 0, + "content": "Is the video capturing a static scene? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.172, + 0.482, + 0.183 + ], + "angle": 0, + "content": "Is the scene captured from a distance? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.184, + 0.463, + 0.195 + ], + "angle": 0, + "content": "Is the video captured with a drone? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.196, + 0.444, + 0.207 + ], + "angle": 0, + "content": "Is the video computer-generated? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.208, + 0.43, + 0.219 + ], + "angle": 0, + "content": "Is the video content abstract? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.22, + 0.523, + 0.231 + ], + "angle": 0, + "content": "Is there something moving through the scene? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.232, + 0.535, + 0.243 + ], + "angle": 0, + "content": "Is there someone doing something in the video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.244, + 0.528, + 0.255 + ], + "angle": 0, + "content": "Are there several things moving in the video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.256, + 0.528, + 0.267 + ], + "angle": 0, + "content": "Is there an object that is being manipulated? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.268, + 0.437, + 0.279 + ], + "angle": 0, + "content": "Are there animals in the video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.28, + 0.411, + 0.291 + ], + "angle": 0, + "content": "Is the scene mostly static? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.292, + 0.534, + 0.303 + ], + "angle": 0, + "content": "Are things occluding each other in this video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.304, + 0.658, + 0.315 + ], + "angle": 0, + "content": "Is there something obstructing the view apart from the watermark? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.316, + 0.541, + 0.327 + ], + "angle": 0, + "content": "Is there a large number of things in the video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.328, + 0.58, + 0.339 + ], + "angle": 0, + "content": "Are there more than 5 different objects in the video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.34, + 0.717, + 0.35 + ], + "angle": 0, + "content": "Is it hard to keep track of some entities because they are moving so much? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.352, + 0.632, + 0.362 + ], + "angle": 0, + "content": "Is someone looking at a phone, a tablet or a computer screen? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.364, + 0.769, + 0.374 + ], + "angle": 0, + "content": "Are they looking at a phone, a tablet or a computer screen during the whole video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.376, + 0.541, + 0.387 + ], + "angle": 0, + "content": "Are there several moving persons in this video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.388, + 0.541, + 0.399 + ], + "angle": 0, + "content": "Are there several moving animals in this video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.4, + 0.495, + 0.41 + ], + "angle": 0, + "content": "Are there several objects in this video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.412, + 0.593, + 0.422 + ], + "angle": 0, + "content": "Are there several similar-looking objects in the video? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.423, + 0.372, + 0.434 + ], + "angle": 0, + "content": "Do they look similar? Reply yes or no." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.444, + 0.886, + 0.505 + ], + "angle": 0, + "content": "We use LLaVA-OneVision [78] model to extract LLM features from the videos. For each video, we prompt with 26 different questions to extract features ranging from, \"is the video a landscape video?\" to, \"are there any moving objects in the video?\" The features are then used by a random forest model to determine the video quality score." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.523, + 0.365, + 0.536 + ], + "angle": 0, + "content": "A.2.3 PVD Benchmark Distribution" + }, + { + "type": "table", + "bbox": [ + 0.377, + 0.552, + 0.622, + 0.673 + ], + "angle": 0, + "content": "
CategoryNumber of videosAvg. Caption Length
Hand Actions214354.2
Object Interactions186442.6
Food Preparation169156.8
Work Activities168947.8
Outdoor Scenes155850.7
Animals142350.9
Water Scenes133744.6
Object Handling130751.6
Close-up Shots112245.1
Nature Scenes86638.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.676, + 0.886, + 0.706 + ], + "angle": 0, + "content": "Table 16 PVD Benchmark Statistics. We created a dataset of 15K videos together with human-verified captions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.151, + 0.495, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.253, + 0.219, + 0.36, + 0.228 + ], + "angle": 0, + "content": "Category: Hand Actions" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.23, + 0.484, + 0.252 + ], + "angle": 0, + "content": "Caption: The video captures a closeup shot of person typing on a keyboard. The camera moves from the left side of the keyboard to the right, an animation of the revolving globe and some numbers can be seen in the frame and the video ends." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.153, + 0.881, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.627, + 0.223, + 0.757, + 0.232 + ], + "angle": 0, + "content": "Category: Object Interactions" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.233, + 0.86, + 0.248 + ], + "angle": 0, + "content": "Caption: The video shows a black and white spiral that is spinning. The spiral is made up of alternating black and white stripes that are evenly spaced and symmetrical." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.279, + 0.495, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.344, + 0.368, + 0.352 + ], + "angle": 0, + "content": "Category: Food Preparation" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.354, + 0.489, + 0.383 + ], + "angle": 0, + "content": "Caption: The video shows a person cutting an green color item into small pieces. They are using a knife to slice the pickle into thin pieces, and then chopping those pieces into smaller cubes. The person is working on a wooden cutting board, and the Hands are visible from the left side of the frame with pink nail paint on their nails." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.279, + 0.881, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.636, + 0.35, + 0.748, + 0.359 + ], + "angle": 0, + "content": "Category: Work Activities" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.36, + 0.868, + 0.376 + ], + "angle": 0, + "content": "Caption: The video shows a person using a shovel to clean the ashes from a fireplace. They are scooping up the ashes and removing them from the fireplace." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.404, + 0.495, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.471, + 0.365, + 0.479 + ], + "angle": 0, + "content": "Category: Outdoor Scenes" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.481, + 0.484, + 0.511 + ], + "angle": 0, + "content": "Caption: The video shows a tall, pointed structure in the middle of a field. and the structure is surrounded by trees and other vegetation. The field is divided into sections, with some areas covered in green grass and others covered in white material. The video shows the structure and the field from a distance, with the camera moving around it." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.407, + 0.881, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.651, + 0.474, + 0.733, + 0.483 + ], + "angle": 0, + "content": "Category: Animals" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.484, + 0.871, + 0.507 + ], + "angle": 0, + "content": "Caption: The video shows a white and gray adult cat and two kittens. The adult cat is grooming the kitten closest to it with its tongue, and the kitten is looking around. A hand reaches out from the frame's upper left to pet the two kittens." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.533, + 0.495, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.606, + 0.36, + 0.614 + ], + "angle": 0, + "content": "Category: Water Scenes" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.616, + 0.478, + 0.632 + ], + "angle": 0, + "content": "Caption: The video shows a large school of fish swimming in a water body towards the right frame. The camera too pans a little to the right." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.533, + 0.881, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.634, + 0.606, + 0.75, + 0.614 + ], + "angle": 0, + "content": "Category: Object Handling" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.616, + 0.868, + 0.632 + ], + "angle": 0, + "content": "Caption: The video shows a person putting a bowl of something into an oven. The person then closes the oven door. The background is blurry." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.667, + 0.495, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.251, + 0.726, + 0.362, + 0.735 + ], + "angle": 0, + "content": "Category: Close-up Shots" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.737, + 0.489, + 0.766 + ], + "angle": 0, + "content": "Caption: The video shows a white counter with two brown buckets and a yellow bucket. Then a person's right hand wearing a green glove enters the frame from top right side and place a yellow flower near to yellow watering can. The person then places the flower, in front of the buckets and exits the frame. In the background is a brown wall, and the camera is static throughout the clip." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.663, + 0.881, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.637, + 0.73, + 0.747, + 0.738 + ], + "angle": 0, + "content": "Category: Nature Scenes" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.74, + 0.865, + 0.763 + ], + "angle": 0, + "content": "Caption: The video shows a pile of branches and leaves on fire in a field. The fire is burning brightly, with flames licking at the edges of the pile. The smoke from the fire rises into the air, billowing up into the sky." + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.778, + 0.885, + 0.806 + ], + "angle": 0, + "content": "Figure 18 More PE Video Dataset Examples. For each of the ten categories, we randomly pick one video and show its video caption. The captions were generated by our video data pipeline and then refined by human annotators." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.112, + 0.082, + 0.383, + 0.1 + ], + "angle": 0, + "content": "B Implementation Details" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.116, + 0.227, + 0.131 + ], + "angle": 0, + "content": "B.1 PE Core" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.142, + 0.888, + 0.159 + ], + "angle": 0, + "content": "We provide additional implementation details for building \\(\\mathrm{PE}_{\\mathrm{core}}\\). Our implementation is based on OpenCLIP5." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.175, + 0.388, + 0.191 + ], + "angle": 0, + "content": "B.1.1 Architecture and Training Setups" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.203, + 0.889, + 0.355 + ], + "angle": 0, + "content": "Model Architecture. Following CLIP, \\(\\mathrm{PE}_{\\mathrm{core}}\\) comprises a Transformer-based [141] vision and a text encoder. We employ customized Transformer configurations as detailed in Tab. 17. For pooling, we an attention pooling block in the style of SigLIP [160] with 8 heads from the last-layer feature to construct image and video embeddings. Regarding positional embedding, we use 2D RoPE [127] for relative positional embeddings and 2D learnable absolute positional embeddings (abs) the same size as the model's input resolution. We interpolate positional embeddings to enable support for various resolutions beyond the default. The text context length is 72 for G-scale and 32 for B and L-scale models. Originally a bug, we find it optimal to not disable the class token when using attention pooling for smaller models. Thus, the B and L models use a class token, then the attention pooling layer probes all features at once (class token included). Finally, we use an input mean and standard deviation of \\((0.5,0.5,0.5)\\) for simplicity." + }, + { + "type": "table", + "bbox": [ + 0.191, + 0.366, + 0.807, + 0.453 + ], + "angle": 0, + "content": "
ScaleTowerParamsWidthDepthMLPHeadsCLIP DimPoolingPositional EmbeddingResolution & Context LenPatch SizeClass Token Register
BVision0.09B768123072121024Attn PoolRoPE+Abs22416
Text0.31B102424409616EOS TokenAbs32--
LVision0.32B1024244096161024Attn PoolRoPE+Abs33614
Text0.31B102424409616EOS TokenAbs32--
GVision1.88B1536508960161280Attn PoolRoPE+Abs44814
Text0.47B128024512020EOS TokenAbs72--
" + }, + { + "type": "table_caption", + "bbox": [ + 0.328, + 0.456, + 0.667, + 0.469 + ], + "angle": 0, + "content": "Table 17 PE Model Configurations with full details." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.49, + 0.886, + 0.536 + ], + "angle": 0, + "content": "PE Core Training. As discussed in §2.4, the training of \\(\\mathrm{PE}_{\\mathrm{core}}\\) involves three stages: 1) image pretraining; 2) image and video finetuning; and 3) an additional model distillation for smaller models. These three stages work together to develop a robust and effective \\(\\mathrm{PE}_{\\mathrm{core}}\\) model." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.543, + 0.853, + 0.559 + ], + "angle": 0, + "content": "We first provide training recipes for 1) image pretraining in Tab. 18 and 2) video finetuning in Tab. 19." + }, + { + "type": "table", + "bbox": [ + 0.145, + 0.571, + 0.34, + 0.798 + ], + "angle": 0, + "content": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate2e-3
batch size131,072
warm-up steps2K
training steps443K (B, L) / 656K (G)
data quantity5.4B
samples seen58B (B, L) / 86B (G)
max logit scale100
mask reg ratio0.4
mask reg batch8192
progressive res112-160-224 (B)
98-154-224-336 (L)
98-154-224-336-448 (G)
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
" + }, + { + "type": "table", + "bbox": [ + 0.409, + 0.571, + 0.614, + 0.751 + ], + "angle": 0, + "content": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size4096
warm-up steps2K
training steps5.4K
data quantity22M
samples seen22M
max logit scale100
number of frames8
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
" + }, + { + "type": "table", + "bbox": [ + 0.695, + 0.571, + 0.84, + 0.724 + ], + "angle": 0, + "content": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size16384
warm-up steps2K
training steps269K
data quantity5.4B
samples seen4.4B
max logit scale100
teacher logit scale200 (§C.3)
data augNone
" + }, + { + "type": "table_caption", + "bbox": [ + 0.701, + 0.728, + 0.835, + 0.74 + ], + "angle": 0, + "content": "Table 20 Distillation." + }, + { + "type": "table_caption", + "bbox": [ + 0.425, + 0.755, + 0.595, + 0.768 + ], + "angle": 0, + "content": "Table 19 Video Finetuning." + }, + { + "type": "table_caption", + "bbox": [ + 0.153, + 0.802, + 0.328, + 0.815 + ], + "angle": 0, + "content": "Table 18 Image Pretraining." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.832, + 0.884, + 0.862 + ], + "angle": 0, + "content": "After training the largest G-scale model, we train the smaller models with image pretraining, then distill with image distillation in Tab. 20, then finally apply video finetuning at the end." + }, + { + "type": "page_footnote", + "bbox": [ + 0.129, + 0.87, + 0.419, + 0.884 + ], + "angle": 0, + "content": "5https://github.com/mlfoundations/open Clip" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.082, + 0.426, + 0.095 + ], + "angle": 0, + "content": "B.1.2 Zero-Shot Classification and Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.109, + 0.887, + 0.171 + ], + "angle": 0, + "content": "Zero-Shot Evaluation on Images and Videos. We use CLIPBench for zero-shot classification and retrieval benchmarking. The benchmark datasets and splits are obtained from the original dataset websites or HuggingFace. We extend the CLIPBench zero-shot evaluation to include video datasets such as MSR-VTT and Kinetics, and will release our model checkpoints, evaluation code, and scripts for reproducibility." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.182, + 0.888, + 0.228 + ], + "angle": 0, + "content": "Prompt Design. For zero-shot image-text and video-text retrieval, we rely solely on the original captions without any additional prompts. In contrast, for zero-shot classification, we utilize task-specific prompts graciously provided by the InternVL [19] authors. All additional prompts will be released." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.234, + 0.885, + 0.266 + ], + "angle": 0, + "content": "For example, we employ specific prompts for zero-shot image classification on various ImageNet benchmarks (e.g., ImageNet val, ImageNet v2) and video classification on Kinetics datasets (e.g., K400, K600, K700)." + }, + { + "type": "title", + "bbox": [ + 0.12, + 0.274, + 0.445, + 0.286 + ], + "angle": 0, + "content": "Zero-Shot Image Classification Prompts - ImageNet" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.29, + 0.882, + 0.484 + ], + "angle": 0, + "content": "a bad photo of a \\(\\{\\mathbf{c}\\}\\). a photo of many \\(\\{\\mathbf{c}\\}\\). a sculpture of a \\(\\{\\mathbf{c}\\}\\). a photo of the hard to see \\(\\{\\mathbf{c}\\}\\). a low resolution photo of the \\(\\{\\mathbf{c}\\}\\). a rendering of a \\(\\{\\mathbf{c}\\}\\). graffiti of a \\(\\{\\mathbf{c}\\}\\). a bad photo of the \\(\\{\\mathbf{c}\\}\\). a cropped photo of the \\(\\{\\mathbf{c}\\}\\). a tattoo of a \\(\\{\\mathbf{c}\\}\\). the embroidered \\(\\{\\mathbf{c}\\}\\). a photo of a hard to see \\(\\{\\mathbf{c}\\}\\). a bright photo of a \\(\\{\\mathbf{c}\\}\\). a photo of a clean \\(\\{\\mathbf{c}\\}\\). a photo of a dirty \\(\\{\\mathbf{c}\\}\\). a dark photo of the \\(\\{\\mathbf{c}\\}\\). a drawing of a \\(\\{\\mathbf{c}\\}\\). a photo of my \\(\\{\\mathbf{c}\\}\\). the plastic \\(\\{\\mathbf{c}\\}\\). a photo of the cool \\(\\{\\mathbf{c}\\}\\). a close-up photo of a \\(\\{\\mathbf{c}\\}\\). a black and white photo of the \\(\\{\\mathbf{c}\\}\\). a painting of the \\(\\{\\mathbf{c}\\}\\). a painting of a \\(\\{\\mathbf{c}\\}\\). a pixelated photo of the \\(\\{\\mathbf{c}\\}\\). a sculpture of the \\(\\{\\mathbf{c}\\}\\). a bright photo of the \\(\\{\\mathbf{c}\\}\\). a cropped photo of a \\(\\{\\mathbf{c}\\}\\). a plastic \\(\\{\\mathbf{c}\\}\\). a photo of the dirty \\(\\{\\mathbf{c}\\}\\). aJPEG corrupted photo of a \\(\\{\\mathbf{c}\\}\\). a blurry photo of the \\(\\{\\mathbf{c}\\}\\). a photo of the \\(\\{\\mathbf{c}\\}\\). a good photo of the \\(\\{\\mathbf{c}\\}\\). a rendering of the \\(\\{\\mathbf{c}\\}\\). a \\(\\{\\mathbf{c}\\}\\) in a video game. a photo of one \\(\\{\\mathbf{c}\\}\\). a doodle of a \\(\\{\\mathbf{c}\\}\\). a close-up photo of the \\(\\{\\mathbf{c}\\}\\). a photo of a \\(\\{\\mathbf{c}\\}\\). the origami \\(\\{\\mathbf{c}\\}\\). the \\(\\{\\mathbf{c}\\}\\) in a video game. a sketch of a \\(\\{\\mathbf{c}\\}\\). a doodle of the \\(\\{\\mathbf{c}\\}\\). a origami \\(\\{\\mathbf{c}\\}\\). a low resolution photo of a \\(\\{\\mathbf{c}\\}\\). the toy \\(\\{\\mathbf{c}\\}\\). a rendition of the \\(\\{\\mathbf{c}\\}\\). a photo of the clean \\(\\{\\mathbf{c}\\}\\). a photo of a large \\(\\{\\mathbf{c}\\}\\). a rendition of a \\(\\{\\mathbf{c}\\}\\). a photo of a nice \\(\\{\\mathbf{c}\\}\\). a photo of a weird \\(\\{\\mathbf{c}\\}\\). a blurry photo of a \\(\\{\\mathbf{c}\\}\\). a cartoon \\(\\{\\mathbf{c}\\}\\). art of a \\(\\{\\mathbf{c}\\}\\). a sketch of the \\(\\{\\mathbf{c}\\}\\). a embroidered \\(\\{\\mathbf{c}\\}\\). a pixelated photo of a \\(\\{\\mathbf{c}\\}\\). itap of the \\(\\{\\mathbf{c}\\}\\). a JPEG corrupted photo of the \\(\\{\\mathbf{c}\\}\\). a good photo of a \\(\\{\\mathbf{c}\\}\\). a plushie \\(\\{\\mathbf{c}\\}\\). a photo of the nice \\(\\{\\mathbf{c}\\}\\). a photo of the small \\(\\{\\mathbf{c}\\}\\). a photo of the weird \\(\\{\\mathbf{c}\\}\\). the cartoon \\(\\{\\mathbf{c}\\}\\). art of the \\(\\{\\mathbf{c}\\}\\). a drawing of the \\(\\{\\mathbf{c}\\}\\). a photo of the large \\(\\{\\mathbf{c}\\}\\). a black and white photo of a \\(\\{\\mathbf{c}\\}\\). the plushie \\(\\{\\mathbf{c}\\}\\). a dark photo of a \\(\\{\\mathbf{c}\\}\\). itap of a \\(\\{\\mathbf{c}\\}\\). graffiti of the \\(\\{\\mathbf{c}\\}\\). a toy \\(\\{\\mathbf{c}\\}.\\) itap of my \\(\\{\\mathbf{c}\\}.\\) a photo of a cool \\(\\{\\mathbf{c}\\}.\\) a photo of a small \\(\\{\\mathbf{c}\\}.\\) a tattoo of the \\(\\{\\mathbf{c}\\}.\\)" + }, + { + "type": "title", + "bbox": [ + 0.12, + 0.495, + 0.446, + 0.506 + ], + "angle": 0, + "content": "Zero-Shot Video Classification Prompts - Kinetics" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.51, + 0.88, + 0.607 + ], + "angle": 0, + "content": "a photo of \\(\\{\\mathbf{c}\\}\\). a photo of a person \\(\\{\\mathbf{c}\\}\\). a photo of a person using \\(\\{\\mathbf{c}\\}\\). a photo of a person doing \\(\\{\\mathbf{c}\\}\\). a photo of a person during \\(\\{\\mathbf{c}\\}\\). a photo of a person performing \\(\\{\\mathbf{c}\\}\\). a photo of a person practicing \\(\\{\\mathbf{c}\\}\\). a video of \\(\\{\\mathbf{c}\\}\\). a video of a person using \\(\\{\\mathbf{c}\\}\\). a video of a person doing \\(\\{\\mathbf{c}\\}\\). a video of a person during \\(\\{\\mathbf{c}\\}\\). a video of a person performing \\(\\{\\mathbf{c}\\}\\). a video of a person practicing \\(\\{\\mathbf{c}\\}\\). a example of \\(\\{\\mathbf{c}\\}\\). a example of a person \\(\\{\\mathbf{c}\\}\\). a example of a person using \\(\\{\\mathbf{c}\\}\\). a example of a person doing \\(\\{\\mathbf{c}\\}\\). a example of a person during \\(\\{\\mathbf{c}\\}\\). a example of a person performing \\(\\{\\mathbf{c}\\}\\). a example of a person practicing \\(\\{\\mathbf{c}\\}\\). a demonstration of \\(\\{\\mathbf{c}\\}\\). a demonstration of a person \\(\\{\\mathbf{c}\\}\\). a demonstration of a person using \\(\\{\\mathbf{c}\\}\\). a demonstration of a person doing \\(\\{\\mathbf{c}\\}\\). a demonstration of a person during \\(\\{\\mathbf{c}\\}\\). a demonstration of a person performing \\(\\{\\mathbf{c}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.621, + 0.888, + 0.712 + ], + "angle": 0, + "content": "Evaluation Method. Several works use different input transformations for different datasets when evaluating zero-shot performance (e.g., [33, 130, 138, 160]). To be as fair as possible, we follow [130] in evaluating with two transformations—center crop and non aspect ratio preserving resize (\"squash\")—and report the max between the two for all models and all datasets we evaluate. Additionally, ObjectNet has a red border around every image to facilitate dedduplication, which we remove for evaluation. Finally, we follow [19] in using retrieval reweighting (DSL), applying the softmax score distribution to the similarities used for retrieval:" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.722, + 0.887, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\text {s c o r e s} = \\text {s c o r e s} * \\text {s o f t m a x} (\\text {s c o r e s}, \\dim = 0) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.747, + 0.888, + 0.793 + ], + "angle": 0, + "content": "This slightly improves retrieval for most models, so we do it for all models we evaluate for fairness. Notably, we were able to reproduce the reported numbers for most papers with these techniques, but for cases where we could not, we default to the reported number." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.811, + 0.363, + 0.827 + ], + "angle": 0, + "content": "B.2 PE: Language Alignment" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.835, + 0.888, + 0.852 + ], + "angle": 0, + "content": "We provide details of the MLLM experimental setup in \\(\\S 4\\). We describe data, model, and training separately." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.862, + 0.888, + 0.893 + ], + "angle": 0, + "content": "Data. Our MLLM training contains warmup data and supervised finetuning (SFT) data. Our warmup data is a 1M subset image-text pairs of our \\(\\mathrm{PE}_{\\mathrm{core}}\\) pretraining dataset. For SFT data, we use a diverse data" + }, + { + "type": "page_footnote", + "bbox": [ + 0.128, + 0.9, + 0.446, + 0.914 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/LAION-AI/CLIP_benchmark" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.142 + ], + "angle": 0, + "content": "mix consisting of 2.6M unique samples. This dataset is composed of \\(1.7\\mathrm{M}^7\\) visual QAs samples from the Cauldron [65], 0.5M grounded QA pairs from Visual Genome [60], Flickr-Entities [103] and Densely Captioned Images [139], 0.1M image-captioning pairs from COCO [76] and 0.3M text-only samples. This comprehensive data mix allows us to thoroughly assess our model's capabilities in various MLLM tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.154, + 0.888, + 0.262 + ], + "angle": 0, + "content": "Model. As described in § 4.1, we use a simple vision-language model architecture where a vision encoder and a pretrained decoder-only LLM are connected by a vision projector. For all tables, we use either Llama3.1-instruct 8B or QwenLM 2.5-instruct 7B as a language model, and 2-layer MLP as a vision projector. For fair comparison, we use the native resolution for image input. During inference, we evaluate the models on video tasks in zero-shot manner: We concatenate all video frames into a sequence and feed to language model, without seeing video samples during SFT. For all video tasks, we use 8 frames with the same native resolution of height and width. For \\(\\mathrm{PE}_{\\mathrm{core}}\\) and \\(\\mathrm{PE}_{\\mathrm{lang}}\\), this makes \\(448 \\times 448 \\times 8\\) input and \\(32 \\times 32 \\times 8\\) vision tokens." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.272, + 0.889, + 0.455 + ], + "angle": 0, + "content": "Training. MLLM training consists of warmup and supervised finetuning (SFT) stages. In both stages, we freeze vision encoder and train vision projector and LLM. During warmup stage, we use a global batch size of 128 with a learning rate of \\(1 \\times 10^{-4}\\). We gradually increase the learning rate from \\(1 \\times 10^{-6}\\) to \\(1 \\times 10^{-4}\\) over 120 steps, and follow a cosine learning rate decay schedule to train a total of 8,000 steps. During SFT stage, we use a global batch size 256 with a learning rate of \\(1 \\times 10^{-5}\\). Similar to the warmup, we gradually increase the learning rate from \\(1 \\times 10^{-7}\\) to \\(1 \\times 10^{-5}\\) over 300 steps, and follow a cosine learning rate decay schedule to train a total of 12.5K steps. We truncate text-sequences longer than 2,048 tokens on top the visual tokens. This makes the maximum sequence length to be (num. vision tokens) + 2,048. With \\(448 \\times 448\\) input resolution and patch size of 14, we set the maximum sequence length to \\(1,024 + 2,048 = 3,072\\). To represent bounding boxes on output side for image grounding tasks, we simply use text tokens to represent each bounding box: each coordinate is normalized between 000 and 999, in “[x, y, x, y]” box format for top-left and bottom-right corners (e.g., [012, 122, 633, 782])." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.46, + 0.888, + 0.507 + ], + "angle": 0, + "content": "For all baselines, we search for the best intermediate layer features to adapt to LLM. We search over \\(\\{-1, -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -40\\}\\) layers (counting from last) and report the best result in average over OCR/Chart/Document Q&A, Visual Q&A, Image Captioning and Video Understanding." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.524, + 0.342, + 0.541 + ], + "angle": 0, + "content": "B.3 PE: Spatial Alignment" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.551, + 0.273, + 0.566 + ], + "angle": 0, + "content": "B.3.1 Training Details" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.578, + 0.877, + 0.594 + ], + "angle": 0, + "content": "Loss Functions. For self-aligning to frozen \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) layer 41 features (\\(L_{\\mathrm{core}}\\)), we minimize cosine similarity:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.604, + 0.887, + 0.641 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {c o r e}} = \\frac {1}{n _ {\\mathrm {t o k}}} \\sum \\left(\\frac {\\left(S _ {5 0}\\right) \\left(T _ {4 1}\\right) ^ {T}}{\\left\\| S _ {5 0} \\right\\| \\cdot \\left\\| T _ {4 1} \\right\\|}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.647, + 0.888, + 0.695 + ], + "angle": 0, + "content": "where \\( S_{50} \\) denotes the last layer features of the student, \\( T_{41} \\) denotes frozen layer 41 features from \\( \\mathrm{PE}_{\\mathrm{core}}\\mathrm{G} \\), and \\( n_{\\mathrm{tok}} \\) represents the number of tokens. Note that we chose 41 fairly arbitrarily (it is layer 40 when written with indexing from 0). Judging by Fig. 8, any layer around 40 should work (and 39 may be slightly better)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.701, + 0.888, + 0.747 + ], + "angle": 0, + "content": "For the encouraging locality loss \\((L_{\\mathrm{loc}})\\), we compute the pairwise cosine similarity between a model's own tokens and itself. This forms a \"spatial correspondence map\" for what tokens should be considered similar. We then compute the same for the student, and minimize the difference between the two with MSE loss:" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.757, + 0.887, + 0.794 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {l o c}} = \\frac {1}{n _ {\\text {t o k}} ^ {2}} \\sum \\left(\\frac {(S _ {5 0}) (S _ {5 0}) ^ {T}}{| | S _ {5 0} | | ^ {2}} - \\frac {(T _ {\\mathrm {S A M}}) (T _ {\\mathrm {S A M}}) ^ {T}}{| | T _ {\\mathrm {S A M}} | | ^ {2}}\\right) ^ {2} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.804, + 0.888, + 0.836 + ], + "angle": 0, + "content": "where \\( T_{\\mathrm{SAM}} \\) denotes the \"SAM Mask Logits\" constructed in §5.2. We also find using a temperature \\( (t) \\) on the SAM teacher's pairwise cosine similarity term \\( (x) \\) useful: \\( e^{t(x - 1)} \\). The full loss is \\( L_{\\mathrm{spatial}} = L_{\\mathrm{core}} + L_{\\mathrm{loc}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.846, + 0.889, + 0.892 + ], + "angle": 0, + "content": "Hyperparameters. In Tab. 21 we show the training hyperparameters for spatial alignment, finetuned on top of the initial \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) checkpoint. Then in Tab. 22 and Tab. 23, we show the settings for the two teachers and losses. Note that when running the teachers, we run them on the exact same image as the student (same data" + }, + { + "type": "page_footnote", + "bbox": [ + 0.13, + 0.9, + 0.355, + 0.914 + ], + "angle": 0, + "content": "7We excluded multi-images samples." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.113 + ], + "angle": 0, + "content": "aug and all). Additionally, because the SAM 2.1 teacher operates at a resolution of 1024, we upsample the image, generate the mask logits, and then downsample the result. Both teachers are frozen." + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.123, + 0.329, + 0.333 + ], + "angle": 0, + "content": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate5e-4
batch size12,288
warm-up steps0
training steps24K
data quantity5.4B (PEcore PT Data)
samples seen300M
resolution448
mask ratio0.75
mask size2×2 tokens
droppath0.4
layerscale0.1
aspect jitter ar(0.75,1.33)
data augcolor jitter j(0.32,0,0.32,0)
hflip p(0.5)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.142, + 0.337, + 0.319, + 0.35 + ], + "angle": 0, + "content": "Table 21 Spatial Alignment." + }, + { + "type": "table", + "bbox": [ + 0.394, + 0.123, + 0.606, + 0.256 + ], + "angle": 0, + "content": "
configvalues
modelSAM 2.1-L
layermask logits
resolution1024 (interp→448)
lossEq. 3
loss weight1
temperature20
sample points32 × 32 (1024)
pred iou threshold0
stability score threshold0
mask threshold0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.415, + 0.26, + 0.584, + 0.272 + ], + "angle": 0, + "content": "Table 22 SAM 2.1 Teacher." + }, + { + "type": "table", + "bbox": [ + 0.686, + 0.123, + 0.851, + 0.196 + ], + "angle": 0, + "content": "
configvalues
modelPEcoreG
layer41
resolution448
lossEq. 2
loss weight1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.685, + 0.2, + 0.851, + 0.213 + ], + "angle": 0, + "content": "Table 23 PEcoreG Teacher." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.376, + 0.313, + 0.388 + ], + "angle": 0, + "content": "B.3.2 Visualization Method" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.398, + 0.889, + 0.445 + ], + "angle": 0, + "content": "To visualize the features in Fig. 17 and Fig. 20, our goal is to map a 1536-dimensional space down to 3 dimensions to view how the model encodes each token in relation to each other. One naive approach would be to apply PCA with 3 dimensions across all token in the image. However, we find this alone can be misleading." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.451, + 0.889, + 0.558 + ], + "angle": 0, + "content": "Specifically, if the model has rich semantics, it should be the case that most of those 1536 features have some useful information in them. Some of that information could be spatially contiguous, some of it not. We want PCA to only select the spatially contiguous information, since we are trying to evaluate the spatial quality of the features. However, naively applying PCA will not necessarily do that, especially for models with information aggregated in \"global tokens\" (§5.1). Despite these tokens carrying important information, they are not spatially contiguous. Thus, if PCA dedicates a large portion of its 3 dimensions to global tokens, the features will look like their spatial quality is bad, despite the features containing good spatial information." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.564, + 0.888, + 0.656 + ], + "angle": 0, + "content": "So, how do we select for only the spatially contiguous information to visualize? The answer is simple: by definition, the spatially contiguous information will be... spatially contiguous. To keep the spatially contiguous information while lowering the impact of the global tokens, we can simply apply a low pass filter to the features (specifically, a gaussian blur with kernel size 3 and a \\(\\sigma\\) of 1). To retain the detail of the original features, we can average the two together. Thus, to visualize features, we use the 3D PCA of the of the following. \\(x\\) denotes the model's output features, and \\(g(x)\\) denotes gaussian blur." + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.664, + 0.887, + 0.68 + ], + "angle": 0, + "content": "\\[\n0. 5 x + 0. 5 g (x, k = 3, \\sigma = 1) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.688, + 0.888, + 0.735 + ], + "angle": 0, + "content": "We show the impact of this in Fig. 19. Blurring the features make them appear more detailed! In reality, that information was always there, just PCA did not show it. Thus, great care must be taken when visualizing high dimensional feature spaces. If they were easy to map to 3 dimensions—you wouldn't need 1536 of them!" + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.753, + 0.691, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.109, + 0.855, + 0.888, + 0.911 + ], + "angle": 0, + "content": "Figure 19 Feature Visualization Ablation. With raw features (top row), PCA misses spatially contiguous parts of the feature space and instead focuses on global tokens (which carry information but are not spatially coherent). By applying a simple low pass filter (bottom row), we can reveal spatial information that PCA originally missed (see column 2: with raw features, the background looks like a mess, with the low pass filter the tiles become visible)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.142 + ], + "angle": 0, + "content": "Then, to map the PCA dimensions to RBG pixel values, we map each PCA component to a corresponding channel in LCh color space, then convert those LCh colors to RGB to get the final image. Note that we use LCh instead of RGB directly for aesthetic reasons, and also because LCh is a cylindrical color space—where smooth changes to the values look like smooth changes in colors to humans—and thus is easier to discern." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.159, + 0.393, + 0.173 + ], + "angle": 0, + "content": "B.3.3 Frozen Feature Dense Prediction" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.182, + 0.886, + 0.213 + ], + "angle": 0, + "content": "We discuss the detailed settings of the results for dense prediction with frozen features in Tab. 13. Each model is evaluated with its native resolution up to 448 or 448 (whichever is optimal)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.224, + 0.888, + 0.3 + ], + "angle": 0, + "content": "Zero-Shot Tracking. We evaluate our pretrained models on label propagation task using the protocols in [52, 107] on DAVIS dataset [104]. This evaluation does not require any finetuning or probing, therefore preserves the spatial features in the model. Following Toto [107], we use the features from the last \\( n = 7 \\) frames to find the nearest neighbor patch in the current frame, and then propagate the masks from the previous frames to the current frame. Note that this evaluation method does not require any training." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.311, + 0.888, + 0.387 + ], + "angle": 0, + "content": "Semantic Segmentation. For semantic segmentation, we evaluate our pretrained models on ADE20K [167] semantic segmentation task. We use a linear layer and convolutional layer to map intermediate spatial features to segmentation masks following [98]. The models are evaluated and then features are resized to \\(518 \\times 518\\). We only use features from single layer. The probing layers are finetuned with AdamW [83] with a learning rate of 0.001." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.399, + 0.888, + 0.476 + ], + "angle": 0, + "content": "Depth Estimation. For depth estimation on NYUv2 [123], we follow [75, 98]. We use a DPT-head [109] on top of our frozen pretrained model and use only single layer features. We scale the size of the DPT-head for each models based on the hidden size for each architecture. Because NYU is a small dataset and the models we evaluate are large, we observe the results for most models are noisy and prone to overfitting. Thus, for fair comparison we train all models for 20 epochs and for all models take the lowest validation loss over all epochs." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.486, + 0.888, + 0.579 + ], + "angle": 0, + "content": "Frozen Detection. For the frozen feature detection results presented in §3, we evaluated using Mask R-CNN [43] as a probe. We used a resolution of 1024 for Fig. 8 and 768 for the remaining experiments in §3. Because the backbones were frozen, we did not add any global attention and instead simply tiled the input image with a window size of 32 for the 1024px experiments and 24 for the 768px experiments. All models were interpolated to patch 16. Finally, the backbones were frozen and only the FPN and R-CNN heads trained for 15 epochs on COCO with a stepwise decay LR without drop path." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.595, + 0.525, + 0.609 + ], + "angle": 0, + "content": "B.3.4 End-to-End Finetuning Detection and Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.617, + 0.888, + 0.679 + ], + "angle": 0, + "content": "We provide a detailed discussion of settings of end-to-end finetuning on detection and segmentation presented in Tab. 14. The hyperparameters can be found in Tab. 24. We find that the default 100-epoch protocol in ViTDet [72, 149] causes overfitting problems in COCO experiments especially for billion-level parameter vision encoders, so we tune the training epochs, learning rate, drop path and learning rate decay accordingly." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.685, + 0.888, + 0.716 + ], + "angle": 0, + "content": "The LVIS experiment setting is the same as COCO except all L-size models use learning rate of 2e-4 and all g-size and G-size models use 75 epochs." + }, + { + "type": "table", + "bbox": [ + 0.152, + 0.727, + 0.845, + 0.888 + ], + "angle": 0, + "content": "
configvaluesmodellrepochsdrop pathlr decaylayersglobal window indexwindow size
optimizerAdamWOpenAI CLIP-L1e-41000.40.824(5, 11, 17, 23)14
optimizer momentum(0.9, 0.999)MetaCLIP-L1e-41000.40.824(5, 11, 17, 23)14
weight decay0.1MetaCLIP-G5e-5750.50.948(11, 23, 35, 47)14
learning rateSigLIP-so1e-41000.40.827(2, 10, 18, 26)14
learning rate scheduleStep-wise decayEVA02-L1e-41000.40.824(5, 11, 17, 23)14
learning rate decayMAE-L1e-41000.40.824(5, 11, 17, 23)14
batch size64SigLIP2-so1e-41000.40.827(2, 10, 18, 26)14
image size1024 × 1024SigLIP2-g5e-5750.50.940(9, 19, 29, 39)14
augmentationLSJ [0.1, 2.0]DINOv2-L1e-41000.40.824(5, 11, 17, 23)32
epochsDINOv2-g5e-5360.50.940(9, 19, 29, 39)32
drop pathPEcoreG5e-5750.50.950(12, 24, 36, 49)32
positional embeddingabswin [7]PEspatialG5e-5360.50.950(12, 24, 36, 49)32
patch size16
window size
global window index
" + }, + { + "type": "table_caption", + "bbox": [ + 0.265, + 0.893, + 0.733, + 0.906 + ], + "angle": 0, + "content": "Table 24 Settings for End-to-End Finetuning Detection and Segmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.082, + 0.44, + 0.097 + ], + "angle": 0, + "content": "B.3.5 System-Level Comparison on Detection" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.105, + 0.589, + 0.196 + ], + "angle": 0, + "content": "We describe our implementation for system-level comparison to the state-of-the-arts on COCO object detection in Tab 15. Our implementation is based on the DETA repository8. We replace the vision encoder with our \\(\\mathrm{PE}_{\\mathrm{spatial}}\\) and maintain the same hyperparameters as in the end-to-end finetuning settings, while keeping the detector unchanged. The training process consists of three stages:" + }, + { + "type": "table", + "bbox": [ + 0.652, + 0.084, + 0.847, + 0.15 + ], + "angle": 0, + "content": "
Test-Time AugAPbox
No TTA65.2
+ More Queries65.3
+ SoftNMS [6]65.8
+ Flip Aug65.8
+ Multiscale Aug66.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.611, + 0.154, + 0.886, + 0.181 + ], + "angle": 0, + "content": "Table 25 Test-Time Aug for system-level comparison on COCO in Tab. 15." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.203, + 0.884, + 0.233 + ], + "angle": 0, + "content": "1. Initial Training: Train on Objects365 for 12 epochs with an image resolution of \\(1024 \\times 1024\\), a total batch size of 256, and a learning rate of 2e-4, which is divided by 10 at the 10th epoch." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.24, + 0.884, + 0.27 + ], + "angle": 0, + "content": "2. Increasing Resolution: Continue training on Objects365 for 6 epochs with a resolution of \\(1536 \\times 1536\\), a total batch size of 128, and a learning rate of 5e-5, which is divided by 10 at the 5th epoch." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.278, + 0.884, + 0.308 + ], + "angle": 0, + "content": "3. Finetuning: Finetune on COCO dataset for 12 epochs with an image resolution of \\(1728 \\times 1728\\), a total batch size of 64, and a learning rate of 5e-5, which is divided by 10 at the 8th epoch." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.316, + 0.885, + 0.361 + ], + "angle": 0, + "content": "4. Further Increasing Resolution: Further finetune on COCO dataset for 3 epochs with a resolution of \\(1824 \\times 1824\\), a total batch size of 64. To save GPU memory, we use SGD optimizer instead of Adam, with a learning rate of 5e-3, which is divided by 10 at the 2th epoch." + }, + { + "type": "list", + "bbox": [ + 0.13, + 0.203, + 0.885, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.369, + 0.884, + 0.398 + ], + "angle": 0, + "content": "We apply a series of test-time augmentation techniques to further improve the performance, detailed in Tab. 25." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.423, + 0.332, + 0.439 + ], + "angle": 0, + "content": "C Additional Results" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.457, + 0.427, + 0.474 + ], + "angle": 0, + "content": "C.1 PEcore: Robust Image Pretraining" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.481, + 0.886, + 0.557 + ], + "angle": 0, + "content": "In Tab. 26, we present the raw data for the robustness metrics in Fig. 2. Across the board, each change improved almost all metrics (with the exception of progressive resolution slightly hurting the average and mask regularization slightly hurting ImageNet Adversarial). The fact that there were no tradeoffs to these changes, indicate that their improvements to the features are general. This could be why most of these changes improved performance for downstream tasks as well." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.565, + 0.884, + 0.626 + ], + "angle": 0, + "content": "Note that in §2.1, we only discuss changes that we know to work. There are several changes that we have tried that do not work (i.e., do not improve performance or lower performance). For instance: average pooling instead of using a class token, increasing the text tower size, using hue or contrast jitter, and maintaining the same resolution throughout training but dropping tokens instead of progressive resolution (FLIP-style)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.632, + 0.884, + 0.679 + ], + "angle": 0, + "content": "We also find increasing batch size and increasing training iterations for an L scale model to have equivalent effects. This is in contrast to the batch size scaling observed by [160], but it is possible that this difference is down to a hyperparameter issue." + }, + { + "type": "table", + "bbox": [ + 0.312, + 0.69, + 0.694, + 0.846 + ], + "angle": 0, + "content": "
StepZero-Shot Classification
Avg Class.ImageNet w1/2[26]ImageNet v2[12]ObjectNet IN Classes [4]ImageNet Adversarial [47]ImageNet Reminims [46]ImageNet Sketch [143]
1Baseline75.378.971.973.768.391.167.8
2Progressive Resolution75.178.971.872.469.990.567.0
3High Batch Size76.279.572.874.171.891.068.1
4LAMB and High LR76.979.973.374.373.591.568.6
5High Resolution (336)78.380.473.875.679.292.068.8
62D RoPE79.280.774.177.480.992.769.4
7Attention Pooling80.181.074.878.482.993.469.9
8Data Augmentation80.881.175.280.883.193.571.2
9Mask Regularization80.981.375.380.982.893.871.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.849, + 0.884, + 0.877 + ], + "angle": 0, + "content": "Table 26 Robust Image Pretraining Full Results. Raw results for the robustness metrics metrics in Fig. 2. Almost every change improves every metric, but some metrics are improved more than others (e.g., ObjectNet and ImageNet-A)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.13, + 0.895, + 0.371, + 0.908 + ], + "angle": 0, + "content": "8https://github.com/jozhang97/DETA" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.949 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.08, + 0.375, + 0.098 + ], + "angle": 0, + "content": "C.2 \\(\\mathsf{PE}_{\\mathrm{core}}\\) : Video Data Scaling" + }, + { + "type": "table", + "bbox": [ + 0.263, + 0.111, + 0.734, + 0.24 + ], + "angle": 0, + "content": "
Video Data SizeAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet w1 [20]ImageNet v2 [112]ObjectNet In Classes [4]ImageNet adversarial [47]MS-COCO ts→img [76]MS-COCO img→ts [76]MS-COCO ts→img [76]Average VideoKeritics 400 [55]Keritics 600 [55]Keritics 700 [55]UCF 101 [126]HMDB 51 [62]MSR-VTT ts→vid [153]MSR-VTT vid→ts [153]
0M77.083.978.686.690.352.170.357.070.369.461.678.547.440.531.4
3M77.784.178.886.690.953.374.261.672.472.264.288.553.842.837.6
6M78.084.279.086.791.154.072.763.673.573.466.088.954.644.943.6
8M78.484.279.287.091.654.973.664.874.574.567.789.555.346.945.5
11M78.684.279.287.291.855.473.865.275.175.067.689.755.647.745.8
14M78.884.279.287.591.955.774.365.575.475.367.989.955.847.846.3
17M78.984.279.287.792.055.874.365.875.775.568.290.256.048.346.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.244, + 0.885, + 0.272 + ], + "angle": 0, + "content": "Table 27 Scaling Video Data. Increasing the number of synthetic video data generated by our proposed video data engine consistently enhances the performance of image and video classification and retrieval tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.286, + 0.886, + 0.392 + ], + "angle": 0, + "content": "The detailed video data scaling results are presented in Tab. 27. Our experiments demonstrate that increasing the number of synthetic video data generated by the proposed video data engine enhances the performance of classification and retrieval on both image and video benchmarks. On image benchmarks, while improvements on ImageNet val and v2 plateaued earlier compared to ObjectNet and ImageNet Adversarial, MS-COCO retrieval performance continued to show gains. On video benchmarks, scaling synthetic video data consistently yields better performance for both classification and retrieval tasks. We expect that further scaling up the video data with our video data engine will continue to drive performance improvements." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.41, + 0.348, + 0.427 + ], + "angle": 0, + "content": "C.3 \\(\\mathsf{PE}_{\\mathrm{core}}\\) : Smaller Models" + }, + { + "type": "table", + "bbox": [ + 0.283, + 0.442, + 0.721, + 0.557 + ], + "angle": 0, + "content": "
ModelTeacher's TempModel ScaleZero-Shot Classification
Avg Class.ImageNet val [26]ImageNet v2 [112]ObjectNet JV Classes [4]ImageNet Adversarial [47]ImageNet Renditions [46]ImageNet Sketch [143]
vanilla pretrained model-B66.274.267.462.550.283.059.8
distillation×2B65.271.865.561.450.283.658.6
×1B68.074.968.164.754.185.361.1
×0.7B68.275.168.265.354.485.161.3
×0.5B68.375.268.265.354.285.261.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.561, + 0.886, + 0.603 + ], + "angle": 0, + "content": "Table 28 Ablation Study on Teacher's Distribution Temperature. We evaluate the effect of varying temperatures on the teacher's distribution, using a pretrained vanilla CLIP model (ViT-B/14, resolution 224) as a baseline (details in §2.1). The models are finetuned via distillation with a short schedule of 50K steps." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.62, + 0.886, + 0.698 + ], + "angle": 0, + "content": "Ablation: Distillation Temperature. To optimize the performance of smaller models (B and L-scales in Tab. 4), we utilize a distillation finetuning approach with \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) as the teacher model. During this process, both student and teacher models encode image and text inputs to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence loss on both image-to-text and text-to-image similarity distributions." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.703, + 0.886, + 0.765 + ], + "angle": 0, + "content": "We find that using a fixed and smaller temperature (i.e., higher logit scale), which controls the range of logits in the softmax, significantly enhances the effectiveness of distillation. This results in a sharper distribution for the teacher's distributions. In contrast, the student's temperature remains learnable, consistent with our pretraining procedure and CLIP training." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.772, + 0.888, + 0.848 + ], + "angle": 0, + "content": "In Tab. 28, we present an ablation study examining the impact of temperature on the teacher's distribution. For this analysis, we utilize a pretrained vanilla CLIP model (ViT-B/14, resolution 224), which serves as a baseline for comparison (see §2.1 for details). The models are finetuned using distillation with a concise schedule of 50K steps. Notably, our results show that employing a smaller temperature for the teacher's distributions yields improved performance on zero-shot ImageNet benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.859, + 0.888, + 0.906 + ], + "angle": 0, + "content": "Building strong smaller models. In Tab. 29, we demonstrate our step-by-step training strategy for building strong smaller models at the L scale, as discussed in §2.4. Specifically, we outline our approach to image pretraining, image distillation, and video finetuning, and distillation. Leveraging the robust foundation established by our" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.95 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.149, + 0.079, + 0.847, + 0.182 + ], + "angle": 0, + "content": "
ModelStageImage Zero-ShotVideo Zero-Shot
Average ImageImageNetv1 [26]ImageNetv2 [112]ObjectNetIN Classes [4]ImageNetAdversarial [47]MS-COCOv1→v1img [76]MS-COCOimg→v1img [76]Average VideoKinetics400 [55]Kinetics600 [53]Kinetics700 [55]UCF101 [126]HMDB 51 [62]MS-RVTTv1→v1v1d [153]MS-RVTTv1→v1v1d [153]
SigLIP2-L/16 [138]-76.083.177.484.484.355.371.456.265.362.556.886.749.341.531.4
PEcoreLimage pretraining75.182.976.881.885.653.070.459.068.067.758.585.557.742.033.4
PEcoreL+image distillation from PEcoreG77.683.678.184.488.956.074.764.573.072.664.886.558.047.948.4
PEcoreL+video finetuning78.083.577.984.789.057.175.965.373.472.765.387.158.550.350.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.185, + 0.887, + 0.227 + ], + "angle": 0, + "content": "Table 29 Building Strong Smaller Models. This table illustrates the step-by-step process of developing the \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}336\\mathrm{px}\\) model, as outlined in §2.4. Starting with the pretrained \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}\\), both image distillation, along with video finetuning, enhance performance across image and video benchmarks, resulting in a unified L-scale model." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.254, + 0.886, + 0.3 + ], + "angle": 0, + "content": "pretraining techniques (§2.1), we show that distilling from \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\), our strongest unified perception encoder, yields improvements on both image and video benchmarks. Furthermore, a short-scheduled video finetuning provides an additional boost in performance on both benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.318, + 0.371, + 0.336 + ], + "angle": 0, + "content": "C.4 \\(\\mathsf{PE}_{\\mathrm{lang}}\\): Additional Results" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.342, + 0.888, + 0.45 + ], + "angle": 0, + "content": "Analogous to Tab. 10, in Tab. 30, we compare \\(\\mathrm{PE}_{\\mathrm{core}}\\) and \\(\\mathrm{PE}_{\\mathrm{lang}}\\) with dynamic resolution setting [77, 82]. More specifically, we use up to 4 tiles, following after a thumbnail, which is a whole image resized into \\(448 \\times 448\\). With the maximum number of tiles of 4, the model can cover \\(\\{1 \\times 1, 1 \\times 2, 1 \\times 3, 1 \\times 4, 2 \\times 1, 2 \\times 2, 3 \\times 1, 4 \\times 1\\}\\) tile ratios. Similar to the Tab. 10, 11, 12 in the main paper, we show that \\(\\mathrm{PE}_{\\mathrm{lang}}\\) largely outperforms the baseline vision encoders by large margins across all categories of MLLM tasks. Note that \\(\\mathrm{PE}_{\\mathrm{lang}}\\) has been alignment-tuned with native resolution input, as opposed to e.g., InternViT 2.5, which has been midtrained with dynamic tiling, which shows \\(\\mathrm{PE}_{\\mathrm{lang}}\\) 's strong generality for different input formats." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.455, + 0.888, + 0.562 + ], + "angle": 0, + "content": "Next, in Tab. 31, 32, 33, we show the breakdowns of RefCOCO/+/g [56] with Llama 3.1-instruct 8B as language model, Qwen2.5 LM 7B as language model, and with Llama 3.1-instruct 8B and dynamic tiling \\((4 + 1)\\), respectively. In our SFT data, we have VisualGenome [60], DCI [139], and Flickr30K [103] as grounding datasets, and RefCOCO/+/g are unseen. We therefore report zero-shot performance of the MLLMs to evaluate spatial understanding capability of the vision encoders. Overall, \\(\\mathrm{PE}_{\\mathrm{lang}}\\) L or G show the best performance across all RefCOCO splits, except with Qwen2.5 LM. This is because (1) InternViT 2.5 6B is midtrained with Qwen2 LM, and (2) during pre/mid-training the training data of RefCOCO/+/g are seen." + }, + { + "type": "table", + "bbox": [ + 0.112, + 0.574, + 0.891, + 0.814 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolution Patch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [1]Avg. Ground. RetCOCOg+ [58]Avg. VideoVdeoMME Acc [38]STAR Acc [148]TCIF-QA Acc [53]EgoSchema Acc [89]MVBench Acc [68]PerceptionTest Acc [105]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1461.871.162.540.273.374.665.364.988.579.8113.490.4133.5116.267.148.044.847.162.739.046.048.3
MetaCLIP-G [152]1.8B224/1460.368.161.339.172.874.965.465.988.280.1114.291.8134.4116.566.049.046.546.562.545.044.748.9
PElang G†1.7B*224/1470.279.879.147.574.676.070.664.388.380.6116.392.0136.4120.569.556.649.055.969.961.250.053.6
576 Tokens per Tile
CLIP [106]0.3B336/1469.676.878.250.372.976.371.864.988.080.4114.090.9134.4116.668.550.846.652.265.044.646.349.9
AIMv2-L [37]0.3B336/1466.774.174.945.272.477.473.565.689.081.7116.492.5137.1119.566.654.143.454.370.656.047.352.7
SigLIP2-so [138]0.4B384/1655.561.454.933.372.376.570.166.088.681.2118.095.8138.3119.866.554.344.952.866.858.649.653.3
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1477.582.188.561.877.479.780.266.489.882.5120.397.4140.2123.271.959.849.462.774.164.053.155.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1656.966.056.534.370.976.469.966.288.481.2117.894.7137.8120.967.846.247.044.966.739.234.545.1
PEcoreL0.3B448/1467.172.478.346.471.276.474.063.788.879.0113.991.5134.5115.762.951.447.051.262.749.647.850.1
PElang L0.3B448/1478.382.889.365.275.978.578.864.489.681.3117.894.7138.1120.771.656.547.057.268.059.852.354.7
AIMv2 3B [37]2.7B448/1467.573.078.246.572.278.879.266.288.381.7119.095.8139.7121.565.154.049.655.467.349.649.952.5
InternViT2.5 6B [18]5.5B448/1467.474.674.347.672.975.971.364.887.779.7110.485.3132.5113.556.852.046.049.665.050.649.651.3
PEcoreG1.9B448/1468.073.481.247.669.776.474.362.589.179.6113.091.6134.5112.967.653.246.054.367.051.248.752.0
PElang G1.7B*448/1478.681.889.867.875.080.382.366.789.682.8119.695.2140.3123.471.859.049.661.873.960.052.656.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.817, + 0.888, + 0.887 + ], + "angle": 0, + "content": "Table 30 4+1 Tile Llama 8B MLLM Results. Llama 3.1-instruct 8B [82] is used as a language model. \\({}^{*}\\mathrm{PE}_{\\mathrm{lang}}\\) has 1.7B parameters since we discard the last 3 layers during language alignment. All MLLMs are trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of \\(448\\times 448\\) (or the corresponding resolution for each encoder). The image tiles follow after a thumbnail input, similar to prior work [77]. Evaluation on an model that was interpolated without additional training (i.e., zero-shot resolution)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.949 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.296, + 0.08, + 0.709, + 0.35 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolution Path SizeAvg. Ground.
RefCOCO val/ [56]RefCOCO testA [56]RefCOCO val/ [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1460.663.656.767.554.158.948.867.267.8
MetaCLIP-G [152]1.8B224/1460.562.056.567.853.558.749.268.268.3
PEiang G†1.7B*224/1465.767.764.470.958.362.056.673.274.4
576 Tokens per Image
CLIP [106]0.3B336/1465.066.761.471.657.662.554.573.272.8
AIMv2-L [37]0.3B336/1463.365.461.669.655.060.052.071.171.5
AIMv2-L Dist. [37]0.3B336/1462.664.861.069.454.459.051.370.870.0
SigLIP2-so [138]0.4B384/1667.468.866.571.060.361.858.576.276.0
SigLIP2-g-opt [138]1.1B384/1666.567.966.170.158.861.757.175.575.0
PEiang G†1.7B*336/1468.969.867.573.261.564.060.877.377.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1466.969.366.772.658.363.157.274.274.0
SigLIP2-so [138]0.4B512/1669.671.469.274.461.364.860.377.977.2
PEcore L0.3B448/1459.761.755.366.953.158.848.068.567.5
PEiang L0.3B448/1470.571.870.273.063.766.162.778.878.9
DINOv2 [98]1.1B448/1464.967.262.570.557.061.054.573.173.1
AIMv2 3B [37]2.7B448/1436.137.634.140.732.736.232.036.938.6
InternViT2.5 6B [18]5.5B448/1468.070.267.672.260.664.058.775.375.2
PEcore G1.9B448/1466.668.364.472.358.762.756.075.175.0
PEiang G1.7B*448/1471.371.969.975.164.267.363.079.479.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.352, + 0.884, + 0.368 + ], + "angle": 0, + "content": "Table 31 Llama MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used for zeroshot RefCOCO/+/g grounding." + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.379, + 0.709, + 0.579 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolutionPatch SizeAvg. Ground.
RefCOCO var[56]RefCOCO texA[56]RefCOCO var[56]RefCOCO+ texA[56]RefCOCO+ var[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1670.073.673.074.360.962.759.978.477.2
SigLIP2-g-opt [138]1.1B384/1669.973.372.473.660.562.360.778.478.2
PEiangG†1.7B*336/1470.173.472.075.362.064.261.278.477.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1468.172.469.174.159.362.456.675.275.5
SigLIP2-so [138]0.4B512/1670.574.173.774.461.762.961.078.677.9
PEcoreL0.3B448/1466.570.467.871.557.761.156.275.875.3
PEiangL0.3B448/1470.474.472.674.662.264.062.079.078.7
DINOv2 [98]1.1B448/1469.373.471.173.960.063.959.076.476.7
AIMv2 3B [37]2.7B448/1467.671.467.772.359.261.256.376.476.4
InternViT2.5 6B‡ [18]5.5B448/1472.877.776.577.163.666.062.280.079.5
PEcoreG1.9B448/1470.574.071.875.861.564.860.178.577.3
PEiangG1.7B*448/1472.175.472.976.364.265.962.979.779.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.582, + 0.884, + 0.61 + ], + "angle": 0, + "content": "Table 32 Qwen MLLM-Based Zereshot RefCOCO. QwenLM 2.5 7B [155] is used as a language model. All MLLMs report zereshot results on RefCOCO/+/g datasets. \\(\\ddagger\\)Trained with RefCOCO/+/g beforehand." + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.622, + 0.709, + 0.863 + ], + "angle": 0, + "content": "
ModelEncoder ParamsResolutionAvg. Ground.Grounding
RefCOCORefCOCORefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+
val [56]val [56]val [56]val [56]val [56]val [56]val [56]val [56]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1467.169.365.073.260.564.956.574.373.4
MetaCLIP-G [152]1.8B224/1466.067.963.271.959.262.955.873.873.1
PElang G†1.7B*224/1470.371.669.673.763.366.262.678.678.2
576 Tokens per Tile
CLIP [106]0.3B336/1468.570.766.674.161.165.958.176.075.1
AIMv2-L [37]0.3B336/1466.668.465.571.459.363.456.574.274.2
SigLIP2-so [138]0.4B384/1666.567.966.170.158.861.757.175.575.0
SigLIP2-g-opt [138]1.1B384/1666.568.265.670.159.062.358.074.874.0
PElang G†1.7B*336/1471.973.671.574.964.867.363.980.480.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1667.869.267.871.259.962.559.076.976.0
PEcoreL0.3B448/1462.965.359.969.256.662.252.070.170.0
PElang L0.3B448/1471.673.070.874.365.267.262.979.779.7
AIMv2 3B [37]2.7B448/1465.166.962.971.158.162.455.671.872.2
InternViT2.5 B‡ [18]5.5B448/1456.861.056.465.851.057.046.158.058.9
PEcoreG1.9B448/1467.669.265.872.459.964.158.375.175.6
PElang G1.7B*448/1471.872.670.774.664.866.664.680.480.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.866, + 0.885, + 0.909 + ], + "angle": 0, + "content": "Table 33 4+1 Tile Llama 8B MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used as a language model. All trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of the encoder's native resolution, with a thumbnail image in front, similar to prior work [77]. \\( {}^{ \\ddagger } \\) Trained with RefCOCO/+/g beforehand." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.112, + 0.08, + 0.478, + 0.098 + ], + "angle": 0, + "content": "C.5 PEspatial: Additional Qualitative Results" + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.111, + 0.496, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.112, + 0.885, + 0.757 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.76, + 0.888, + 0.803 + ], + "angle": 0, + "content": "Figure 20 More Visualizations of the feature space following Fig. 17. After the image itself, column 1 is \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) last layer features, column 2 is \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) aligned to its own layer 41, column 3 is \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) aligned to SAM 2.1-L [111] mask logits, and column 4 is \\(\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}\\) aligned to both, denoted \\(\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}\\). See §B.3.2 for visualization method." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.082, + 0.228, + 0.098 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.114, + 0.887, + 0.142 + ], + "angle": 0, + "content": "[1] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In ICCV, 2019. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.149, + 0.888, + 0.26 + ], + "angle": 0, + "content": "[2] Pravesh Agrawal, Szymon Antoniak, Emma Bou Hanna, Baptiste Bout, Devendra Chaplot, Jessica Chudnovsky, Diogo Costa, Baudouin De Monicault, Saurabh Garg, Theophile Gervet, Soham Ghosh, Amélie Héliou, Paul Jacob, Albert Q. Jiang, Kartik Khandelwal, Timothee Lacroix, Guillaume Lample, Diego Las Casas, Thibaut Lavril, Teven Le Scao, Andy Lo, William Marshall, Louis Martin, Arthur Mensch, Pavankumar Muddireddy, Valera Nemychnikova, Marie Pellat, Patrick Von Platen, Nikhil Raghuraman, Baptiste Rozière, Alexandre Sablayrolles, Lucile Saulnier, Romain Sauvestre, Wendy Shang, Roman Soletskyi, Lawrence Stewart, Pierre Stock, Joachim Studnia, Sandeep Subramanian, Sagar Vaze, Thomas Wang, and Sophia Yang. Pixtral 12b. arXiv:2410.07073, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.267, + 0.888, + 0.31 + ], + "angle": 0, + "content": "[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv:2308.12966, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.316, + 0.888, + 0.358 + ], + "angle": 0, + "content": "[4] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models. In NeurIPS, 2019. 3, 4, 6, 8, 9, 10, 30, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.365, + 0.888, + 0.463 + ], + "angle": 0, + "content": "[5] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, Thomas Unterthiner, Daniel Keysers, Skanda Koppula, Fangyu Liu, Adam Grycner, Alexey A. Gritsenko, Neil Houlsby, Manoj Kumar, Keran Rong, Julian Eisenschlos, Rishabh Kabra, Matthias Bauer, Matko Bosnjak, Xi Chen, Matthias Minderer, Paul Voigtlaender, Ioana Bica, Ivana Balazevic, Joan Puigcerver, Pinelopi Papalampidi, Olivier J. Henaff, Xi Xiong, Radu Soricut, Jeremiah Harmsen, and Xiaohua Zhai. PaliGemma: A versatile 3b VLM for transfer. arXiv:2407.07726, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.47, + 0.885, + 0.498 + ], + "angle": 0, + "content": "[6] Navaneeth Bodla, Bharat Singh, Rama Chellappa, and Larry S Davis. Soft-NMS-Improving object detection with one line of code. In ICCV, 2017. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.505, + 0.885, + 0.534 + ], + "angle": 0, + "content": "[7] Daniel Bolya, Chaitanya Ryali, Judy Hoffman, and Christoph Feichtenhofer. Window attention is bugged: how not to interpolate position embeddings. In *ICLR*, 2023. 11, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.54, + 0.887, + 0.582 + ], + "angle": 0, + "content": "[8] Florian Bordes, Randall Balestriero, Quentin Garrido, Adrien Bardes, and Pascal Vincent. Guillotine regularization: Why removing layers is needed to improve generalization in self-supervised learning. arXiv:2206.13378, 2022. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.589, + 0.885, + 0.617 + ], + "angle": 0, + "content": "[9] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - Mining discriminative components with random forests. In ECCV, 2014. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.624, + 0.887, + 0.652 + ], + "angle": 0, + "content": "[10] Gary Bradski. The OpenCV library. Dr. Dobb's Journal: Software Tools for the Professional Programmer, 2000. 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.659, + 0.887, + 0.687 + ], + "angle": 0, + "content": "[11] Zhaowei Cai and Nuno Vasconcelos. Cascade R-CNN: Delving into high quality object detection. In CVPR, 2018. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.695, + 0.887, + 0.723 + ], + "angle": 0, + "content": "[12] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.73, + 0.887, + 0.772 + ], + "angle": 0, + "content": "[13] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D. Manning. AuroraCap: Efficient, performant video detailed captioning and a new benchmark. In ICLR, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.779, + 0.887, + 0.821 + ], + "angle": 0, + "content": "[14] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. Hybrid task cascade for instance segmentation. In CVPR, 2019. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.828, + 0.885, + 0.857 + ], + "angle": 0, + "content": "[15] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pretraining from pixels. In ICML, 2020. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.121, + 0.863, + 0.885, + 0.892 + ], + "angle": 0, + "content": "[16] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020. 20" + }, + { + "type": "list", + "bbox": [ + 0.121, + 0.114, + 0.888, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.081, + 0.887, + 0.152 + ], + "angle": 0, + "content": "[17] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, Alexander Kolesnikov, Joan Puigcerver, Nan Ding, Keran Rong, Hassan Akbari, Gaurav Mishra, Linting Xue, Ashish Thapliyal, James Bradbury, Weicheng Kuo, Mojtaba Seyedhosseini, Chao Jia, Burcu Karagol Ayan, Carlos Riquelme, Andreas Steiner, Anelia Angelova, Xiaohua Zhai, Neil Houlsby, and Radu Soricut. Pali: A jointly-scaled multilingual language-image model. In ICLR, 2023. 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.158, + 0.887, + 0.257 + ], + "angle": 0, + "content": "[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, Lixin Gu, Xuehui Wang, Qingyun Li, Yimin Ren, Zixuan Chen, Jiapeng Luo, Jiahao Wang, Tan Jiang, Bo Wang, Conghui He, Botian Shi, Xingcheng Zhang, Han Lv, Yi Wang, Wenqi Shao, Pei Chu, Zhongying Tu, Tong He, Zhiyong Wu, Huipeng Deng, Jiaye Ge, Kai Chen, Kaipeng Zhang, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv:2412.05271, 2024. 11, 15, 16, 20, 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.263, + 0.887, + 0.305 + ], + "angle": 0, + "content": "[19] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyuan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. InternVL: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024. 1, 6, 7, 9, 10, 20, 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.311, + 0.887, + 0.341 + ], + "angle": 0, + "content": "[20] Gong Cheng, Junwei Han, and Xiaoqiang Lu. Remote sensing image scene classification: Benchmark and state of the art. Proceedings of the IEEE, 2017. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.347, + 0.887, + 0.431 + ], + "angle": 0, + "content": "[21] Jang Hyun Cho, Andrea Madotto, Effrosyni Mavroudi, Triantafyllos Afouras, Tushar Nagarajan, Muhammad Maaz, Yale Song, Tengyu Ma, Shuming Hu, Hanoona Rasheed, Peize Sun, Po-Yao Huang, Daniel Bolya, Suyog Jain, Miguel Martin, Huiyu Wang, Nikhila Ravi, Shashank Jain, Temmy Stark, Shane Moon, Babak Damavandi, Vivian Lee, Andrew Westbury, Salman Khan, Philipp Krahenbuhl, Piotr Dólar, Lorenzo Torresani, Kristen Grauman, and Christoph Feichtenhofer. Perceptionlm: Open-access data and models for detailed visual understanding. arXiv:2504.13180, 2025. 2, 5, 11, 14, 15, 16, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.438, + 0.887, + 0.466 + ], + "angle": 0, + "content": "[22] Seokju Cho, Heeseong Shin, Sunghwan Hong, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. CAT-Seg: Cost aggregation for open-vocabulary semantic segmentation. In CVPR, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.473, + 0.887, + 0.501 + ], + "angle": 0, + "content": "[23] Timothee Darcet, Maxime Oquab, Julien Mairal, and Piotr Bojanowski. Vision transformers need registers. In ICLR, 2024. 12, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.508, + 0.887, + 0.619 + ], + "angle": 0, + "content": "[24] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdulmohsin, Rodolphe Jenatton, Lucas Beyer, Michael Tschannen, Anurag Arnab, Xiao Wang, Carlos Riquelme, Matthias Minderer, Joan Puigcerver, Utku Evci, Manoj Kumar, Sjoerd van Steenkiste, Gamaleldin F. Elsayed, Aravindh Mahendran, Fisher Yu, Avital Oliver, Fantine Huot, Jasmijn Bastings, Mark Patrick Collier, Alexey Gritsenko, Vighnesh Birodkar, Cristina Vasconcelos, Yi Tay, Thomas Mensink, Alexander Kolesnikov, Filip Pavetic, Dustin Tran, Thomas Kipf, Mario Lučić, Xiaohua Zhai, Daniel Keysers, Jeremiah Harmsen, and Neil Houlsby. Scaling vision transformers to 22 billion parameters. In ICML, 2023. 1, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.626, + 0.887, + 0.751 + ], + "angle": 0, + "content": "[25] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weihs, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv:2409.17146, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.758, + 0.887, + 0.787 + ], + "angle": 0, + "content": "[26] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3, 6, 8, 9, 10, 30, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.793, + 0.887, + 0.822 + ], + "angle": 0, + "content": "[27] Karan Desai and Justin Johnson. VirTex: Learning visual representations from textual annotations. In CVPR, 2021. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.829, + 0.887, + 0.856 + ], + "angle": 0, + "content": "[28] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.864, + 0.887, + 0.906 + ], + "angle": 0, + "content": "[29] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1, 8, 9" + }, + { + "type": "list", + "bbox": [ + 0.119, + 0.081, + 0.887, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.081, + 0.887, + 0.125 + ], + "angle": 0, + "content": "[30] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pre-training of large autoregressive image models. In ICML, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.131, + 0.887, + 0.175 + ], + "angle": 0, + "content": "[31] David Fan, Shengbang Tong, Jiachen Zhu, Koustuv Sinha, Zhuang Liu, Xinlei Chen, Michael Rabbat, Nicolas Ballas, Yann LeCun, Amir Bar, and Saining Xie. Scaling language-free visual representation learning. arXiv:2504.01017, 2025. 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.18, + 0.887, + 0.21 + ], + "angle": 0, + "content": "[32] Lijie Fan, Dilip Krishnan, Phillip Isola, Dina Katabi, and Yonglong Tian. Improving CLIP training with language rewrites. In NeurIPS, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.215, + 0.887, + 0.245 + ], + "angle": 0, + "content": "[33] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. In ICLR, 2024. 1, 3, 9, 16, 20, 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.25, + 0.887, + 0.281 + ], + "angle": 0, + "content": "[34] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA: Exploring the limits of masked visual representation learning at scale. In CVPR, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.285, + 0.887, + 0.316 + ], + "angle": 0, + "content": "[35] Yuxin Fang, Quan Sun, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA-02: A visual representation for neon genesis. Image and Vision Computing, 2024. 1, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.32, + 0.887, + 0.338 + ], + "angle": 0, + "content": "[36] Christoph Feichtenhofer. X3D: Expanding architectures for efficient video recognition. In CVPR, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.342, + 0.887, + 0.399 + ], + "angle": 0, + "content": "[37] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T. Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders. In CVPR, 2025. 1, 2, 10, 11, 15, 16, 19, 20, 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.404, + 0.887, + 0.462 + ], + "angle": 0, + "content": "[38] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-MME: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv:2405.21075, 2024. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.467, + 0.887, + 0.554 + ], + "angle": 0, + "content": "[39] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, Eyal Orgad, Rahim Entezari, Giannis Daras, Sarah Pratt, Vivek Ramanujan, Yonatan Bitton, Kalyani Marathe, Stephen Mussmann, Richard Vencu, Mehdi Cherti, Ranjay Krishna, Pang Wei Koh, Olga Saukh, Alexander Ratner, Shuran Song, Hannaneh Hajishirzi, Ali Farhadi, Romain Beaumont, Sewoong Oh, Alex Dimakis, Jenia Jitsev, Yair Carmon, Vaishaal Shankar, and Ludwig Schmidt. DataComp: In search of the next generation of multimodal datasets. In NeurIPS, 2023. 10, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.558, + 0.887, + 0.589 + ], + "angle": 0, + "content": "[40] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in VQA matter: Elevating the role of image understanding in visual question answering. In CVPR, 2017. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.593, + 0.887, + 0.624 + ], + "angle": 0, + "content": "[41] Agrim Gupta, Piotr Dollar, and Ross Girshick. LVIS: A dataset for large vocabulary instance segmentation. In CVPR, 2019. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.629, + 0.887, + 0.659 + ], + "angle": 0, + "content": "[42] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.663, + 0.887, + 0.681 + ], + "angle": 0, + "content": "[43] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask R-CNN. In ICCV, 2017. 11, 12, 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.685, + 0.887, + 0.716 + ], + "angle": 0, + "content": "[44] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022. 1, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.72, + 0.887, + 0.763 + ], + "angle": 0, + "content": "[45] Greg Heinrich, Mike Ranzinger, Hongxu, Yin, Yao Lu, Jan Kautz, Andrew Tao, Bryan Catanzaro, and Pavlo Molchanov. RADIOv2.5: Improved baselines for agglomerative vision foundation models. In CVPR, 2025. 1, 10, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.77, + 0.887, + 0.813 + ], + "angle": 0, + "content": "[46] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In ICCV, 2021. 3, 8, 9, 30, 31" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.818, + 0.887, + 0.849 + ], + "angle": 0, + "content": "[47] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In CVPR, 2021. 3, 4, 8, 9, 30, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.854, + 0.887, + 0.883 + ], + "angle": 0, + "content": "[48] Byeongho Heo, Song Park, Dongyoon Han, and Sangdoo Yun. Rotary position embedding for vision transformer. In ECCV, 2024. 20" + }, + { + "type": "list", + "bbox": [ + 0.12, + 0.081, + 0.887, + 0.883 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.081, + 0.885, + 0.11 + ], + "angle": 0, + "content": "[49] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. In NeurIPS Deep Learning Workshop, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.116, + 0.887, + 0.145 + ], + "angle": 0, + "content": "[50] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In ECCV, 2016. 14, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.152, + 0.887, + 0.194 + ], + "angle": 0, + "content": "[51] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. OpenCLIP, 2021. 3, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.201, + 0.885, + 0.23 + ], + "angle": 0, + "content": "[52] Allan Jabri, Andrew Owens, and Alexei Efros. Space-time correspondence as a contrastive random walk. In NeurIPS, 2020. 11, 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.236, + 0.885, + 0.265 + ], + "angle": 0, + "content": "[53] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. TGIF-QA: Toward spatio-temporal reasoning in visual question answering. In CVPR, 2017. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.272, + 0.885, + 0.314 + ], + "angle": 0, + "content": "[54] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.321, + 0.885, + 0.363 + ], + "angle": 0, + "content": "[55] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset. arXiv:1705.06950, 2017. 6, 9, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.37, + 0.885, + 0.399 + ], + "angle": 0, + "content": "[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 14, 15, 16, 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.405, + 0.885, + 0.434 + ], + "angle": 0, + "content": "[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.441, + 0.887, + 0.482 + ], + "angle": 0, + "content": "[58] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dolkar, and Ross Girshick. Segment anything. In ICCV, 2023. 5, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.489, + 0.885, + 0.518 + ], + "angle": 0, + "content": "[59] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV Workshop, 2013. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.525, + 0.885, + 0.567 + ], + "angle": 0, + "content": "[60] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV, 2017. 27, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.574, + 0.885, + 0.602 + ], + "angle": 0, + "content": "[61] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In NeurIPS, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.609, + 0.885, + 0.638 + ], + "angle": 0, + "content": "[62] Hildegard Kuehne, Hueihan Jhuang, Estfbaliz Garrote, Tomaso Poggio, and Thomas Serre. HMDB: a large video database for human motion recognition. In ICCV, 2011. 9, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.644, + 0.885, + 0.673 + ], + "angle": 0, + "content": "[63] Weicheng Kuo, Yin Cui, Xiuye Gu, A. J. Piergiovanni, and Anelia Angelova. F-VLM: open-vocabulary object detection upon frozen vision and language models. In ICLR, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.679, + 0.885, + 0.721 + ], + "angle": 0, + "content": "[64] Zhengfeng Lai, Haotian Zhang, Bowen Zhang, Wentao Wu, Haoping Bai, Aleksei Timofeev, Xianzhi Du, Zhe Gan, Jiulong Shan, Chen-Nee Chuah, Yinfei Yang, and Meng Cao. VeCLIP: Improving CLIP training via visual-enriched captions. In ECCV, 2024. 5, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.728, + 0.885, + 0.757 + ], + "angle": 0, + "content": "[65] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? In NeurIPS, 2024. 27" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.764, + 0.885, + 0.792 + ], + "angle": 0, + "content": "[66] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. LLaVA-OneVision: Easy visual task transfer. TMLR, 2025. 16, 20, 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.798, + 0.885, + 0.828 + ], + "angle": 0, + "content": "[67] Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In ICCV, 2023. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.834, + 0.885, + 0.876 + ], + "angle": 0, + "content": "[68] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, Limin Wang, and Yu Qiao. MVBench: A comprehensive multi-modal video understanding benchmark. In CVPR, 2024. 14, 15, 16, 32" + }, + { + "type": "list", + "bbox": [ + 0.12, + 0.081, + 0.887, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.081, + 0.887, + 0.125 + ], + "angle": 0, + "content": "[69] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.131, + 0.857, + 0.147 + ], + "angle": 0, + "content": "[70] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for CLIP training. In NeurIPS, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.152, + 0.885, + 0.182 + ], + "angle": 0, + "content": "[71] Xianhang Li, Zeyu Wang, and Cihang Xie. CLIPA-v2: Scaling CLIP training with 81.1% zero-shot imagenet accuracy within a $10,000 budget; an extra $4,000 unlocks 81.8% accuracy. arXiv:2306.15658, 2023. 3, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.187, + 0.885, + 0.217 + ], + "angle": 0, + "content": "[72] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. In ECCV, 2022. 11, 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.223, + 0.885, + 0.252 + ], + "angle": 0, + "content": "[73] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, 2023. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.258, + 0.885, + 0.288 + ], + "angle": 0, + "content": "[74] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In CVPR, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.293, + 0.885, + 0.322 + ], + "angle": 0, + "content": "[75] Zhenyu Li, Xuyang Wang, Xianming Liu, and Junjun Jiang. Binsformer: Revisiting adaptive bins for monocular depth estimation. TIP, 2024. 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.328, + 0.885, + 0.37 + ], + "angle": 0, + "content": "[76] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In ECCV, 2014. 2, 6, 9, 12, 14, 15, 16, 19, 27, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.377, + 0.885, + 0.407 + ], + "angle": 0, + "content": "[77] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. LLaVA-NeXT: Improved reasoning,OCR, and world knowledge, 2024. 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.413, + 0.885, + 0.429 + ], + "angle": 0, + "content": "[78] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. NeurIPS, 2024. 20, 23" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.435, + 0.885, + 0.464 + ], + "angle": 0, + "content": "[79] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 3, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.469, + 0.885, + 0.499 + ], + "angle": 0, + "content": "[80] Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, and Baining Guo. Swin transformer v2: Scaling up capacity and resolution. In CVPR, 2022. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.504, + 0.885, + 0.534 + ], + "angle": 0, + "content": "[81] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.539, + 0.838, + 0.555 + ], + "angle": 0, + "content": "[82] AI @ Meta Llama Team. The llama 3 herd of models. arXiv:2407.21783, 2024. 5, 14, 15, 16, 20, 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.561, + 0.771, + 0.577 + ], + "angle": 0, + "content": "[83] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. *ICLR*, 2019. 3, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.582, + 0.885, + 0.612 + ], + "angle": 0, + "content": "[84] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. CLIP4Clip: An empirical study of clip for end to end video clip retrieval. Neurocomputing, 2021. 6, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.618, + 0.885, + 0.647 + ], + "angle": 0, + "content": "[85] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. SiT: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In ECCV, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.653, + 0.885, + 0.682 + ], + "angle": 0, + "content": "[86] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-ChatGPT: Towards detailed video understanding via large vision and language models. In ACL, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.688, + 0.885, + 0.717 + ], + "angle": 0, + "content": "[87] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. VideoGPT+: Integrating image and video encoders for enhanced video understanding. arXiv:2406.09418, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.723, + 0.885, + 0.752 + ], + "angle": 0, + "content": "[88] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arxiv:1306.5151, 2013. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.758, + 0.885, + 0.788 + ], + "angle": 0, + "content": "[89] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. NeurIPS, 2024. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.794, + 0.885, + 0.836 + ], + "angle": 0, + "content": "[90] Kevis-Kokitsi Maninis, Kaifeng Chen, Soham Ghosh, Arjun Karpur, Koert Chen, Ye Xia, Bingyi Cao, Daniel Salz, Guangxing Han, Jan Dlabal, et al. Tips: Text-image pretraining with spatial awareness. arXiv:2410.16512, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.843, + 0.885, + 0.872 + ], + "angle": 0, + "content": "[91] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. DocVQA: A dataset for vqa on document images. In WACV, 2021. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.878, + 0.885, + 0.907 + ], + "angle": 0, + "content": "[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographics. In WACV, 2022. 14, 15, 16, 32" + }, + { + "type": "list", + "bbox": [ + 0.119, + 0.081, + 0.887, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.081, + 0.887, + 0.152 + ], + "angle": 0, + "content": "[93] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, Anton Belyi, Haotian Zhang, Karanjeet Singh, Doug Kang, Ankur Jain, Hongyu He, Max Schwarzer, Tom Gunter, Xiang Kong, Aonan Zhang, Jianyu Wang, Chong Wang, Nan Du, Tao Lei, Sam Wiseman, Guoli Yin, Mark Lee, Zirui Wang, Ruoming Pang, Peter Grasch, Alexander Toshev, and Yinfei Yang. MM1: methods, analysis and insights from multimodal LLM pre-training. In ECCV, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.158, + 0.886, + 0.202 + ], + "angle": 0, + "content": "[94] Matthias Minderer, Alexey A. Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. Simple open-vocabulary object detection with vision transformers. In ECCV, 2022. 1, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.207, + 0.887, + 0.235 + ], + "angle": 0, + "content": "[95] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In NeurIPS, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.242, + 0.885, + 0.272 + ], + "angle": 0, + "content": "[96] Thao Nguyen, Samir Yitzhak Gadre, Gabriel Ilharco, Sewoong Oh, and Ludwig Schmidt. Improving multimodal datasets with image captioning. In NeurIPS, 2023. 5, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.278, + 0.885, + 0.306 + ], + "angle": 0, + "content": "[97] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In ICVGIP, 2008. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.313, + 0.887, + 0.383 + ], + "angle": 0, + "content": "[98] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Mido Assran, Nicolas Ballas, Wojciech Galuba, Russell Howes, Po-Yao Huang, Shang-Wen Li, Ishan Misra, Michael Rabbat, Vasu Sharma, Gabriel Synnaeve, Hu Xu, Hervé Jégou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision. TMLR, 2024. 1, 2, 10, 11, 15, 16, 18, 19, 20, 22, 29, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.389, + 0.887, + 0.418 + ], + "angle": 0, + "content": "[99] Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, and Philipp Krahenbuhl. NMSstrikes back. arXiv:2212.06137, 2022. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.425, + 0.885, + 0.44 + ], + "angle": 0, + "content": "[100] Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In CVPR, 2012. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.446, + 0.885, + 0.475 + ], + "angle": 0, + "content": "[101] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. arXiv:2306.14824, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.482, + 0.885, + 0.525 + ], + "angle": 0, + "content": "[102] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Kenji Kawaguchi, Hanxiao Liu, Adams Wei Yu, Jiahui Yu, Yi-Ting Chen, Minh-Thang Luong, Yonghui Wu, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. Neurocomputing, 2023. 1, 9, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.53, + 0.885, + 0.572 + ], + "angle": 0, + "content": "[103] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In ICCV, 2015. 27, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.58, + 0.885, + 0.609 + ], + "angle": 0, + "content": "[104] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 DAVIS challenge on video object segmentation. arXiv:1704.00675, 2017. 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.615, + 0.885, + 0.685 + ], + "angle": 0, + "content": "[105] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adriâ Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, Carl Doersch, Tatiana Matejovicova, Yury Sulsky, Antoine Miech, Alex Frechette, Hanna Klimczak, Raphael Koster, Junlin Zhang, Stephanie Winkler, Yusuf Aytar, Simon Osindero, Dima Damen, Andrew Zisserman, and João Carreira. Perception test: A diagnostic benchmark for multimodal video models. In NeurIPS, 2024. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.691, + 0.885, + 0.735 + ], + "angle": 0, + "content": "[106] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 3, 8, 9, 15, 16, 19, 20, 31, 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.74, + 0.885, + 0.782 + ], + "angle": 0, + "content": "[107] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pre-training from videos. arXiv:2501.05453, 2025. 19, 20, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.79, + 0.885, + 0.819 + ], + "angle": 0, + "content": "[108] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv:2204.06125, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.824, + 0.885, + 0.854 + ], + "angle": 0, + "content": "[109] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In ICCV, 2021, 11, 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.86, + 0.885, + 0.889 + ], + "angle": 0, + "content": "[110] Mike Ranzinger, Greg Heinrich, Jan Kautz, and Pavlo Molchanov. AM-RADIO: Agglomerative vision foundation model—reduce all domains into one. In CVPR, 2024. 1, 18, 21" + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.081, + 0.887, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.138 + ], + "angle": 0, + "content": "[111] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. SAM 2: Segment anything in images and videos. In ICLR, 2024. 2, 5, 17, 18, 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.144, + 0.885, + 0.174 + ], + "angle": 0, + "content": "[112] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet? In ICML, 2019. 3, 6, 8, 9, 30, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.18, + 0.885, + 0.222 + ], + "angle": 0, + "content": "[113] William A. Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. The dollar street dataset: images representing the geographic and socioeconomic diversity of the world. In NeurIPS Datasets and Benchmarks, 2022. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.228, + 0.885, + 0.258 + ], + "angle": 0, + "content": "[114] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.265, + 0.885, + 0.293 + ], + "angle": 0, + "content": "[115] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In ECCV, 2020. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.3, + 0.885, + 0.328 + ], + "angle": 0, + "content": "[116] Mert Bulent Sariyildiz, Philippe Weinzaepfel, Thomas Lucas, Diane Larlus, and Yannis Kalantidis. UNIC: Universal classification models via multi-teacher distillation. In ECCV, 2024. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.334, + 0.885, + 0.391 + ], + "angle": 0, + "content": "[117] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In NeurIPS Datasets and Benchmarks, 2022. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.397, + 0.885, + 0.427 + ], + "angle": 0, + "content": "[118] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-OKVQA: A benchmark for visual question answering using world knowledge. In ECCV, 2022. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.433, + 0.885, + 0.462 + ], + "angle": 0, + "content": "[119] Jinghuan Shang, Karl Schmeckpeper, Brandon B May, Maria Vittoria Minniti, Tarik Kelestemur, David Watkins, and Laura Herlant. Theia: Distilling diverse vision foundation models for robot learning. In CoRL, 2024. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.468, + 0.885, + 0.497 + ], + "angle": 0, + "content": "[120] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In ICCV, 2019. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.503, + 0.885, + 0.533 + ], + "angle": 0, + "content": "[121] Shashank Shekhar, Florian Bordes, Pascal Vincent, and Ari Morcos. Objectives matter: Understanding the impact of self-supervised objectives on vision transformer representations. arXiv:2304.13089, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.538, + 0.885, + 0.567 + ], + "angle": 0, + "content": "[122] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension. In ECCV, 2020. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.574, + 0.885, + 0.603 + ], + "angle": 0, + "content": "[123] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.609, + 0.885, + 0.638 + ], + "angle": 0, + "content": "[124] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In ICLR, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.644, + 0.885, + 0.673 + ], + "angle": 0, + "content": "[125] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards VQA models that can read. In CVPR, 2019. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.679, + 0.885, + 0.708 + ], + "angle": 0, + "content": "[126] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. UCF101: A dataset of 101 human actions classes from videos in the wild. arXiv:1212.0402, 2012. 9, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.714, + 0.885, + 0.744 + ], + "angle": 0, + "content": "[127] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. Neurocomputing, 2024. 4, 20, 25" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.75, + 0.885, + 0.779 + ], + "angle": 0, + "content": "[128] Lin Sun, Jiale Cao, Jin Xie, Xiaoheng Jiang, and Yanwei Pang. CLIPer: Hierarchically improving spatial representation of CLIP for open-vocabulary semantic segmentation. arXiv:2411.13836, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.785, + 0.885, + 0.814 + ], + "angle": 0, + "content": "[129] Quan Sun, Yuxin Fang, Ledell Wu, Xinlong Wang, and Yue Cao. EVA-CLIP: Improved training techniques for clip at scale. arXiv:2303.15389, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.82, + 0.885, + 0.849 + ], + "angle": 0, + "content": "[130] Quan Sun, Jinsheng Wang, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, and Xinlong Wang. EVA-CLIP-18B: Scaling clip to 18 billion parameters. arXiv:2402.04252, 2024. 1, 9, 10, 20, 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.855, + 0.885, + 0.884 + ], + "angle": 0, + "content": "[131] Mingxing Tan and Quoc Le. EfficientNet: Rethinking model scaling for convolutional neural networks. In ICML, 2019. 1, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.891, + 0.649, + 0.906 + ], + "angle": 0, + "content": "[132] Gemma Team. Gemma 3 technical report. arXiv:2503.19786, 2025. 16, 20" + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.081, + 0.885, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.11 + ], + "angle": 0, + "content": "[133] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100M: The new data in multimedia research. Communications of the ACM, 2016. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.117, + 0.885, + 0.159 + ], + "angle": 0, + "content": "[134] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Ziteng Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024. 11, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.166, + 0.885, + 0.195 + ], + "angle": 0, + "content": "[135] Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In ICCV, 2021. 14, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.201, + 0.815, + 0.216 + ], + "angle": 0, + "content": "[136] Hugo Touvron, Matthieu Cord, and Hervé Jégou. DeiT III: Revenge of the ViT. In ECCV, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.223, + 0.885, + 0.251 + ], + "angle": 0, + "content": "[137] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. In NeurIPS, 2023. 1, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.258, + 0.885, + 0.314 + ], + "angle": 0, + "content": "[138] Michael Tschannen, Alexey Gritsenko, Xiao Wang, Muhammad Ferjad Naeem, Ibrahim Alabdulmohsin, Nikhil Parthasarathy, Talfan Evans, Lucas Beyer, Ye Xia, Basil Mustafa, Olivier Henaff, Jeremiah Harmsen, Andreas Steiner, and Xiaohua Zhai. SigLIP 2: Multilingual vision-language encoders with improved semantic understanding, localization, and dense features. arXiv:2502.14786, 2025. 2, 7, 8, 9, 10, 15, 16, 18, 19, 26, 32, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.321, + 0.885, + 0.362 + ], + "angle": 0, + "content": "[139] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating CLIP-style models on dense captions. In CVPR, 2024. 27, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.37, + 0.885, + 0.399 + ], + "angle": 0, + "content": "[140] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.405, + 0.885, + 0.433 + ], + "angle": 0, + "content": "[141] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 25" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.441, + 0.885, + 0.469 + ], + "angle": 0, + "content": "[142] Matthew Walmer, Saksham Suri, Kamal Gupta, and Abhinav Shrivastava. Teaching matters: Investigating the role of supervision in vision transformers. In CVPR, 2023. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.476, + 0.885, + 0.504 + ], + "angle": 0, + "content": "[143] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In NeurIPS, 2019. 3, 8, 9, 30, 31" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.511, + 0.885, + 0.567 + ], + "angle": 0, + "content": "[144] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv:2409.12191, 2024. 16, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.574, + 0.885, + 0.616 + ], + "angle": 0, + "content": "[145] Wenhai Wang, Jifeng Dai, Zhe Chen, Zhenhang Huang, Zhiqi Li, Xizhou Zhu, Xiaowei Hu, Tong Lu, Lewei Lu, Hongsheng Li, Xiaogang Wang, and Yu Qiao. InternImage: Exploring large-scale vision foundation models with deformable convolutions. In CVPR, 2023. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.623, + 0.885, + 0.665 + ], + "angle": 0, + "content": "[146] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, Tianxiang Jiang, Songze Li, Jilan Xu, Hongjie Zhang, Yifei Huang, Yu Qiao, Yali Wang, and Limin Wang. InternVideo2: Scaling foundation models for multimodal video understanding. In ECCV, 2024. 2, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.672, + 0.885, + 0.701 + ], + "angle": 0, + "content": "[147] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, 2022. 4, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.707, + 0.885, + 0.736 + ], + "angle": 0, + "content": "[148] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. STAR: A benchmark for situated reasoning in real-world videos. In NeurIPS, 2021. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.743, + 0.838, + 0.758 + ], + "angle": 0, + "content": "[149] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detector2, 2019. 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.764, + 0.885, + 0.792 + ], + "angle": 0, + "content": "[150] Jianxiong Xiao, Krista A. Ehinger, James Hays, Antonio Torralba, and Aude Oliva. SUN database: Exploring a large collection of scene categories. IJCV, 2014. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.799, + 0.885, + 0.841 + ], + "angle": 0, + "content": "[151] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen tau Yih, Shang-Wen Li, Saining Xie, and Christoph Feichtenhofer. Altogether: Image captioning via re-aligning alt-text. In EMNLP, 2024. 5, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.848, + 0.885, + 0.89 + ], + "angle": 0, + "content": "[152] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. In ICLR, 2024. 1, 3, 8, 15, 19, 20, 32, 33" + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.11 + ], + "angle": 0, + "content": "[153] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. MSR-VTT: A large video description dataset for bridging video and language. In CVPR, 2016. 6, 7, 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.116, + 0.887, + 0.228 + ], + "angle": 0, + "content": "[154] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arxiv:2407.10671, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.235, + 0.887, + 0.319 + ], + "angle": 0, + "content": "[155] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv:2412.15115, 2024. 16, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.326, + 0.885, + 0.368 + ], + "angle": 0, + "content": "[156] Yang You, Jing Li, Sashank J. Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In ICLR, 2020. 3, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.375, + 0.885, + 0.404 + ], + "angle": 0, + "content": "[157] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. TACL, 2014. 9, 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.41, + 0.885, + 0.439 + ], + "angle": 0, + "content": "[158] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. CoCa: Contrastive captioners are image-text foundation models. TMLR, 2022. 1, 9, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.446, + 0.885, + 0.487 + ], + "angle": 0, + "content": "[159] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. In ICLR, 2025, 20, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.495, + 0.885, + 0.523 + ], + "angle": 0, + "content": "[160] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1, 4, 7, 9, 16, 19, 20, 22, 25, 26, 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.53, + 0.885, + 0.558 + ], + "angle": 0, + "content": "[161] Hao Zhang, Feng Li, Shilong Liu, Lei Zhang, Hang Su, Jun Zhu, Lionel M Ni, and Heung-Yeung Shum. DINO: DETR with improved denoising anchor boxes for end-to-end object detection. In ICLR, 2023. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.565, + 0.804, + 0.58 + ], + "angle": 0, + "content": "[162] Richard Zhang, Phillip Isola, and Alexei A Efros. Colorful image colorization. In ECCV, 2016. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.587, + 0.885, + 0.615 + ], + "angle": 0, + "content": "[163] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D. Manning, and Curtis P. Langlotz. Contrastive learning of medical visual representations from paired images and text. In MLHC, 2022. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.622, + 0.885, + 0.677 + ], + "angle": 0, + "content": "[164] Long Zhao, Nitesh Bharadwaj Gundavarapu, Liangzhe Yuan, Hao Zhou, Shen Yan, Jennifer J. Sun, Luke Friedman, Rui Qian, Tobias Weyand, Yue Zhao, Rachel Hornung, Florian Schroff, Ming Yang, David A. Ross, Huisheng Wang, Hartwig Adam, Mikhail Sirotenko, Ting Liu, and Boqing Gong. VideoPrism: A foundational visual encoder for video understanding. In ICML, 2024. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.685, + 0.885, + 0.713 + ], + "angle": 0, + "content": "[165] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. In WACV, 2025. 14, 15, 16, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.72, + 0.885, + 0.748 + ], + "angle": 0, + "content": "[166] Liang Zheng, Yali Zhao, Shengjin Wang, Jingdong Wang, and Qi Tian. Good practice in cnn feature transfer. arXiv:1604.00133, 2016. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.755, + 0.885, + 0.783 + ], + "angle": 0, + "content": "[167] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ADE20K dataset. In CVPR, 2017. 19, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.79, + 0.885, + 0.888 + ], + "angle": 0, + "content": "[168] Jinguo Zhu, Weiyun Wang, Zhe Chen, Zhaoyang Liu, Shenglong Ye, Lixin Gu, Yuchen Duan, Hao Tian, Weijie Su, Jie Shao, Zhangwei Gao, Erfei Cui, Yue Cao, Yangzhou Liu, Weiye Xu, Hao Li, Jiahao Wang, Han Lv, Dengnian Chen, Songze Li, Yinan He, Tan Jiang, Jiapeng Luo, Yi Wang, Conghui He, Botian Shi, Xingcheng Zhang, Wenqi Shao, Junjun He, Yingtong Xiong, Wenwen Qu, Peng Sun, Penglong Jiao, Lijun Wu, Kaipeng Zhang, Huipeng Deng, Jiaye Ge, Kai Chen, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. InternVL3: Exploring advanced training and test-time recipes for open-source multimodal models. arxiv:2504.10479, 2025. 2, 16" + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.081, + 0.887, + 0.888 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.082, + 0.885, + 0.109 + ], + "angle": 0, + "content": "[169] Zhuofan Zong, Guanglu Song, and Yu Liu. DETRs with collaborative hybrid assignments training. In ICCV, 2023. 19" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.938, + 0.509, + 0.949 + ], + "angle": 0, + "content": "44" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_origin.pdf b/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..736f9d44ee4b39d9d539cbaac24ab41f163243ca --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/535711ee-09a2-4abd-b47d-e22a9c259d17_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a41a54b160be4e7d3a907163cab0e71b26e7cd344577e9bb96f3b57b7b41e4e3 +size 7826716 diff --git a/data/2025/2504_13xxx/2504.13181/full.md b/data/2025/2504_13xxx/2504.13181/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3273da4220350efe9af31a236390d565e49178cb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/full.md @@ -0,0 +1,1062 @@ +# Perception Encoder: The best visual embeddings are not at the output of the network + +Daniel Bolya $^{1,\ast}$ , Po-Yao Huang $^{1,\ast}$ , Peize Sun $^{1,\ast}$ , Jang Hyun Cho $^{1,2,\ast,\dagger}$ , Andrea Madotto $^{1,\ast}$ , Chen Wei $^{1}$ , Tengyu Ma $^{1}$ , Jiale Zhi $^{1}$ , Jathushan Rajasegaran $^{1}$ , Hanoona Rasheed $^{3,\dagger}$ , Junke Wang $^{4,\dagger}$ , Marco Monteiro $^{1}$ , Hu Xu $^{1}$ , Shiyu Dong $^{5}$ , Nikhila Ravi $^{1}$ , Daniel Li $^{1}$ , Piotr Dólár $^{1}$ , Christoph Feichtenhofer $^{1}$ + +$^{1}$ Meta FAIR, $^{2}$ UT Austin, $^{3}$ MBZUAI, $^{4}$ Fudan University, $^{5}$ Meta Reality Labs *Joint first author, †Work done during internships at Meta + +We introduce Perception Encoder (PE), a state-of-the-art vision encoder for image and video understanding trained via simple vision-language learning. Traditionally, vision encoders have relied on a variety of pretraining objectives, each tailored to specific downstream tasks such as classification, captioning, or localization. Surprisingly, after scaling our carefully tuned image pretraining recipe and refining with our robust video data engine, we find that contrastive vision-language training alone can produce strong, general embeddings for all of these downstream tasks. There is only one caveat: these embeddings are hidden within the intermediate layers of the network. To draw them out, we introduce two alignment methods: language alignment for multimodal language modeling, and spatial alignment for dense prediction. Together, our PE family of models achieves best-in-class results on a wide variety of tasks, including (1) zero-shot image and video classification and retrieval, simultaneously obtaining 86.6 average zero-shot ImageNet robustness and 76.9 zero-shot Kinetics-400 video classification; (2) document, image, and video Q&A, enabling 94.6 DocVQA, 80.9 InfographicVQA, and 82.7 PerceptionTest with an 8B LLM; and (3) spatial tasks such as detection, tracking, and depth estimation, setting a new COCO state-of-the-art of 66.0 box mAP. To foster further research, we release our models, code, and novel dataset of synthetically and human-annotated videos. + +Code: https://github.com/facebookresearch/perception_models + +Dataset: https://ai.meta.com/datasets/pe-video/ + +Meta + +# 1 Introduction + +For the last decade in computer vision, pretrained vision encoders have been the core building block for most applications requiring perception. From million-scale ImageNet [26] pretrained convolutional networks [42, 61, 81, 124, 131] to billion-scale web-pretrained transformers [19, 24, 29, 33, 54, 102, 130, 152, 158], the dominant strategy in vision has consistently been to adapt large-scale pretrained encoders to downstream tasks. + +There are many pretraining objectives today, each with distinct characteristics and each yielding representations better suited for specific tasks: vision-language contrastive losses [106, 160] learn a global vision and language embedding well-suited for zero-shot classification and retrieval as well as provide vision-language alignment for open-world [69, 94] and generative tasks [108, 114]; captioning losses [37, 137] learn to predict image descriptions using a language decoder, which transfers well to downstream multimodal language model (MLLM) tasks; and spatially self-supervised losses [44, 98] learn dense spatial correspondences without language supervision, making them useful for tasks requiring precise localization like object detection. + +Many works are now attempting to combine two or more of these techniques in different ways [19, 34, 35, 37, 45, 90, 110, 158]. While many have been successful, the complexity of these strategies grows exponentially with number of use cases, which can make scaling difficult. There has not yet been shown a single, simple, and easily scalable pretraining technique that can learn state-of-the-art features for all downstream tasks. + +In this work we discover that global vision-language contrastive learning alone can be one such approach. After building a state-of-the-art contrastive model for image and video, we found a surprising result: inside the model were specific features aligned to OCR, VQA, grounding, detection, depth estimation, and tracking. Compared to the state-of-the-art models with captioning [37] and spatially self-supervised [98] pretraining, our + +![](images/0a3b7c226af8ac168ff6731a42e3b174d5240bddd13e3945533cd8ad5d5e2282.jpg) +Figure 1 Perception Encoder (PE) is a family of large-scale vision encoder models with state-of-the-art performance on a large variety of vision tasks. By using a robust contrastive pretraining recipe and finetuning on synthetically aligned videos, PE not only outperforms all existing models on classification and retrieval (§2), but it also internally produces strong, general features that scale for downstream tasks (§3). PE unlocks the ability for large-scale contrastive pretraining to transfer to downstream tasks with alignment tuning to capitalize on those general features (§4, §5). + +contrastive encoder has specific layers that, when used as frozen features, matches or exceeds the performance of the other two pretraining techniques on tasks they should be the best at. The only problem is—these features exist at different layers for each task. By exploiting this phenomenon with alignment tuning, we show it is possible to align these features to the end of the network in order to create state-of-the-art encoders for downstream MLLM and spatial tasks—all following the same easily scalable contrastive pretraining. + +We begin by building $\mathrm{PE}_{\mathrm{core}}$ (Fig. 1, left), a large-scale contrastively pretrained model with state-of-the-art zero-shot performance on both images and video ( $\S 2$ ). To accomplish this, we first focus on developing a strong image-only contrastive pretraining recipe to extract general knowledge from billion-scale image-text data. Keeping the data and training FLOPs fixed, this recipe significantly improves upon vanilla CLIP in both absolute performance and robustness ( $\S 2.1$ ). We then use the resulting model as a frame-based encoder to develop a video data engine for generating well-aligned video captions. Finetuning on this synthetic video-text data substantially improves performance on both image and video classification and retrieval tasks ( $\S 2.2$ ). Motivated by this success, we release a large portion of the data used to train the engine: PE Video Dataset (PVD), consisting of 1M diverse videos with 120K human-refined annotations ( $\S 2.3$ ). Finally, we scale our robust image pretraining and well-aligned video finetuning strategy to 2B parameters to produce $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ ( $\S 2.4$ ), a single unified encoder that outperforms SigLIP2 [138] on zero-shot image tasks and InternVideo2 [146] on most zero-shot video tasks. We further transfer this power to smaller model scales through distillation. + +With the strongest image and video recognition model in hand, we shift our focus to downstream tasks. Remarkably, despite being pretrained with CLIP loss, we find that the intermediate layers of $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ can rival AIMv2-3B [37] on language tasks and DINOv2-g [98] on spatial tasks, both of which among the strongest pretrained models in their respective domains. Upon investigation, we attribute this capability to our robust image pretraining strategy, which appears to have unlocked the potential of contrastive pretraining to scale effectively for downstream tasks (§3). However, a challenge remains: the model does not naturally output these features, keeping them hidden internally. To address this, we introduce two alignment tuning methods (Fig. 1, right) to extract these strong, general features. + +First, in §4, we investigate the most effective technique to align features to the end of the network by adapting to a large language model. This language alignment enables us to construct $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ , which individually outperforms all other popular vision encoders for MLLM tasks. Moreover, when paired with our Perception Language Model (PLM) [21], the combination rivals the latest state-of-the-art MLLMs, like InternVL3 [168] + +Second, in §5, we identify a dichotomy in the layers optimal for spatial tasks. By visualizing the features and pinpointing the explicit reason for this dichotomy, we develop a straightforward spatial alignment approach: distilling from the model's own frozen features to achieve most of the alignment, complemented by a novel use of SAM 2 [111] for spatial correspondence distillation to refine the process. The resulting $\mathrm{PE}_{\mathrm{spatial}}\mathrm{G}$ not only outperforms other popular models in depth estimation, tracking, and semantic segmentation, but also sets a new absolute state-of-the-art on COCO [76] detection with a much simpler decoder. + +With this family of checkpoints, Perception Encoder unlocks the potential to scale one simple pretraining method to solve many downstream vision tasks. We are releasing our models, code, and PE Video Dataset. + +# 2 Perception Encoder: Core + +To build Perception Encoder (PE), we start by training a large-scale, robust, and highly performant vision-language contrastive model for image and video. We have two objectives: first, to enhance the scalability and data efficiency of contrastive training; and second, to create a unified model effective on both image and video. + +These goals are somewhat conflicting: image-text data is plentiful and training on images is efficient, but video-text data is scarce and video training is expensive. Thus, we decouple image and video training into two stages. We first develop a strong image pretraining recipe (§2.1) with several regularization techniques to create a robust starting point. Then we use the resulting image model as a frame encoder to develop a video data engine (§2.2) supported by our novel human-refined video-text dataset (§2.3) to generate aligned captions for video clips. Finally, we finetune the image encoder on the resulting aligned video data (§2.4). Using our data engine design, this short finetuning step substantially improves both image and video performance. + +# 2.1 Robust Image Pretraining + +In the first stage of pretraining, we want to learn as much visual information as possible from a large set of image-text data. Notably, a unique quirk of contrastive training is the loss for a given sample depends on the other samples in the batch. Because each batch is different, there is potential to learn new information every time an example is sampled, even if that sample has been seen before. Thus, we find contrastive learning to benefit from a long training schedule. To exploit this, we design our pretraining recipe with high regularization, stability, and training efficiency in mind. + +Setup. (Fig. 2.1) We track our changes on a vanilla CLIP model using an OpenCLIP [51] ViT-L/14 model at 224 resolution as a baseline. We keep the training budget fixed to around 1T GFLOPs (i.e., a ZFLOP), and train on a fixed 2.3B image-text dataset curated using the MetaCLIP [152] text-only curation pipeline. For the baseline, we use a global batch size of $32\mathrm{K}$ , class token, AdamW [83], and train for 12B samples seen. To assess the generality of the information learned during pretraining, we report not only zero-shot ImageNet val [26] results but also the average performance across a range of robustness metrics, including ImageNet val [26], ImageNet v2 [112], ObjectNet [4], ImageNet Adversarial [47], ImageNet Rendition [46], and ImageNet Sketch [143]. As observed with other pure CLIP models [33, 106, 152], the average robustness metric performance of this vanilla recipe is much lower than ImageNet val alone. + +![](images/ceb3bd96393f5aff984c5308f6100242887bd14fdb3b1fb4bbfff4196daf8815.jpg) +Figure 2 Robust Image Pretraining. We tune our pretraining recipe (§2.1) to maximize performance on a fixed set of data, starting with an OpenCLIP [51] ViT-L/14 model. We report cumulative zero-shot classification results for each modification. The inner bars show robustness evaluation, calculated as the average of 6 robustness benchmarks [4, 26, 46, 47, 112, 143], and the outer bars show ImageNet val [26] alone. Several changes significantly improve robustness, indicating that ImageNet val scales more with data, while robustness can scale with refined training techniques. + +Progressive Resolution. (Fig. 2.2) To enable longer training, we first improve training efficiency. As shown in many works [70, 71, 79, 131, 136], vision encoders work well with a progressively increasing resolution schedule. Thus, we halve the training FLOPs while maintaining performance by evenly splitting the baseline 12B-sample run into 98, 154, and 224 resolution stages, with 4B samples per stage. + +Increasing Batch Size. (Fig. 2.3) We use the extra budget to double the batch size from $32\mathrm{K}$ to $64\mathrm{K}$ , increasing the total samples seen from 12B to 24B. Larger batch size means a higher likelihood for there to be a non-trivially novel pair of samples, i.e., hard negatives. This is akin to increasing the "task difficulty" of CLIP and improves ImageNet val by $+0.6\%$ and robustness by double of that, $+1.1\%$ . + +LAMB Optimizer. (Fig. 2.4) We switch from AdamW to LAMB [156], which is known to stabilize large batch training. More importantly, LAMB allows us to train stably with a higher learning rate of $2 \times 10^{-3}$ compared + +to the original $5 \times 10^{-4}$ . We observe that starting with a high learning rate is important to allow the model to adapt to different resolutions. These factors combine for $+0.4\%$ on ImageNet val and $+0.7\%$ on robustness. + +Increasing Final Resolution. (Fig. 2.5) A classic finding is that parameters and resolution should be scaled together [36, 131]. Thus, we add a fourth 336 resolution stage at the end of training. To keep the training FLOPs the same, we adjust the training schedule to 10B samples at 98 resolution, 8B at 154, 4B at 224, and 2B at 336. While ImageNet val only increases by $+0.5\%$ , robustness improves threefold, rising by $+1.4\%$ . + +RoPE. (Fig. 2.6) We add 2D RoPE [127] to each attention layer to improve extrapolation, keeping the original position embedding. 2D RoPE only improves ImageNet val by $+0.3\%$ but enhances robustness by $+0.9\%$ . + +Attention Pooling. (Fig. 2.7) We follow [160] in constructing the CLIP embedding using an attention probing transformer block. Surprisingly, we found keeping the class token as an input to this block is important for small model performance. Together, this improves ImageNet val by $+0.3\%$ and robustness by $+0.9\%$ . + +Tuned Data Augmentation. (Fig. 2.8) Despite training on billions of samples, we find data augmentation still important—especially for transfer to unlikely scenarios like in ObjectNet [4]. We add heavy random cropping, brightness/saturation jitter, and horizontal flip. Random cropping encourages using the entire caption, as not everything is in frame. Jitter helps low-light settings and documents. Horizontal flip improves natural images and does not hurt OCR (see §2.5). These improve robustness by $+0.7\%$ , notably, ObjectNet by $+2.4\%$ . + +Mask Regularization. (Fig. 2.9) As regularization, we want the model to produce the same features if some patches are not visible. However, passing the CLIP gradients through masked images may negatively alter behavior on unmasked images. Thus, we convert MaskFeat [147] into a regularization loss by duplicating and masking 1/16th of the batch. At the output, the masked tokens are aligned to their unmasked counterparts by maximizing cosine similarity. Care is taken to ensure that the CLIP and masked gradients are disjoint. + +Scaling Behavior. (Figs. 3 and 4) In Fig. 3, we show the performance of our recipe (Fig. 2.9) vs. the original CLIP recipe (Fig. 2.1) across S/14, B/14, and L/14 models. For each benchmark, our recipe scales around the same rate or better than the original CLIP recipe. On some difficult datasets like ObjectNet [4] and ImageNet Adversarial [47], our recipe shows distinctly better scaling. This indicates that the improvements in performance were not at the cost of scalability, meaning we can further benefit from scaling the model size. + +![](images/248bf47810a642cd188b3f5505507120452fc477d1e98c80ff4049a8ab332782.jpg) +Figure 3 Scaling Behavior (Model Size). Results before and after our recipe changes (Fig. 2) for S/14, B/14, and L/14 models. Our recipe improves scaling for difficult metrics like ObjectNet [4] and ImageNet Adeversarial [47]. + +![](images/d1949b98e010fabdd7fb456ec09a255f8c2887f22aad365d1252475c66cdefa8.jpg) + +![](images/49aa7f6eea2b488de6007811ec74e7125803de849e677ead4a356be63e1d3d17.jpg) + +![](images/fd36dd7301032ff0a40f1dd187ce9e5027a7dfd89ff95854cd039651889d1b0d.jpg) + +![](images/3c29c8dd5ab4adbf5915fbfd3e6f44dbee77cedbf231bae611128ee005d47ba6.jpg) + +![](images/2c2bdea6917b4da14b0cfaa830be0cc38860e78457896c6a78d058c5db5e611e.jpg) + +In Fig. 4, we additionally show the performance of our recipe vs. the original CLIP recipe across L/14 models trained with 120K steps (one-third schedule), 240K steps (two-thirds schedule), and 360K steps (full ablation schedule). All models are their own training runs with full learning rate annealing and the progressive resolution schedule adjusted proportionally. We see nearly linear trends for our recipe on most datasets. This suggests we can train longer for more performance, even at L scale and with 24B samples seen already. + +![](images/553115ec45af9f6b65240ce997ff35a15124c9f853156f60dd5b3a711732ed88.jpg) +Figure 4 Scaling Behavior (Training Steps). Results before and after our recipe changes for an L/14 model trained with 120K, 240K, and 360K steps, adjusting the learning rate and progressive resolution schedules accordingly. Despite our recipe being much stronger than the original, there is still room for further improvement by training longer. + +![](images/4196311b231b92cff14d1bbf10a6730543bbd841e4e2ac323e2216df1265ffdd.jpg) + +![](images/82b6d68c90e01f9d4518fb5aa8723e794719fc68e52e2d11a08233decc536e99.jpg) + +![](images/f6fe4fbb9514dad209e0d83d6a694d7480e58bbba06ff60c92091033b5e14532.jpg) + +![](images/1f522eeb6904381c6bc6ab80156024d78351fcf08325c0327bae44b86bea83f4.jpg) + +![](images/9f90e426884e9ecb7989d4a28e0d99bfb2443af89b14067a3357237e5afd2003.jpg) + +# 2.2 Bootstrapping a Video Data Engine with Perception Encoder + +With a robust image pretraining recipe settled and its scaling behavior confirmed, our next step is to extend the image-only encoder to accommodate video and build a unified image-video model. Unlike web-scale image-text data, which comes in many cases with human-generated descriptive alt-text information, videos with aligned language annotation are inherently scarce. High-quality human-annotated captions for videos are even rarer. This scarcity presents a unique and significant challenge in training encoders capable of effectively processing video inputs. + +![](images/16aa33f83b9a7e2879ae72919ee8bd13e2c641010eaf3774f481b59e4a45d689.jpg) +Figure 5 Video Data Engine. To create aligned video-text data for contrastive training, we use a PE-based video captioner [21] to generate a holistic video caption and an image-level captioner [82] on sampled frames. We then provide those captions as well as the original video metadata to text-only LLM [82] to synthesize a single short, aligned caption optimal for contrastive training. + +Inspired by the recent success of image data engines [58, 64, 96, 111, 151], we extend this concept to develop a robust video data engine that generates well-aligned synthetic captions for a diverse set of videos, facilitating the training of a video encoder. This innovative approach represents the first large-scale exploration of its kind. In the following sections, we introduce the process of building our video data engine. + +To bootstrap our contrastive video finetuning, we focus on synthesizing video captions. We build our data engine in three stages: (1) we create a strong baseline video captioner, which we call the Perception Language Model (PLM), described in [21]; (2) we add additional high quality video data with human-refined captions to further enhance the captioner's quality; (3) we refine and summarize the generated video captions with an LLM to construct a large video dataset to use for the contrastive video finetuning of our Perception Encoder. + +Phase 1: Base Video Captioner (PLM). We build our data engine on an early version of PLM [21], a multimodal large language model with PE as the vision encoder and Llama [82] as the language decoder. We train PLM on a large-scale collection of open-access image and video datasets [21]. In total, the training dataset consists of 64.7M images and videos covering natural images, charts, documents, exocentric and egocentric videos. + +Phase 2: PLM + Refined Data. To further boost captioning performance, we collect a set of 265K videos (105K from PVD which we release, see §2.3), caption them with our base PLM model, and ask human raters to refine the captions1. We then fine-tune our base PLM model with this data, significantly improving captioning quality (see Tab. 1). + +
CaptionerAuroraCap [13]VCG Diverse [87]VCG Bench [86] Score
ScoreAccScoreAcc
PLM2.251.93.165.134.3
PLM + Human-Refined Data3.471.13.679.435.2
+ +Table 1 Video Captioning. We use an early version of PLM-8B [21], consisting of our image-only PE encoder and a Llama decoder, for captioning. Adding human-refined data greatly boosts captioning performance (higher is better). + +Phase 3: LLM Summarization. We synthesize the final aligned video captions by incorporating the PLM video captions, Llama 3.2 [82] image-only frame captions, and the existing video metadata of video titles and descriptions (Fig. 5). Similar to image alt-text, video metadata contains knowledge often not covered by the image and video captioning models. Thus, combining the two leads to more comprehensive captions. We summarize video captions, frame captions, and video metadata together using the Llama 3.3 70B model to provide the final captions. The prompt used to generate the summary can be found in Appendix A.1. + +Using the Engine. Finally, we use the resulting data engine bootstrapped with an image-only checkpoint of PE to generate well-aligned, information-dense captions for a diverse set of 22M videos for contrastive finetuning. + +Training with Recaptioned Videos. Our goal is to develop a unified image and video encoder. To encode videos using our existing image encoder, we uniformly sample $N = 8$ frames from video clips and extract frame-level + +embeddings with the image encoder. We then apply average pooling over these frame embeddings to obtain video embeddings, which are used for contrastive learning with encoded video captions by the text encoder. Despite being extremely simple, we find this technique surprisingly effective in producing a strong joint image-video encoder. We share this finding with previous studies [19, 84], which note that simple average pooling outperforms more complex pooling strategies like attention-based compression for video. + +Ablations. In Tab. 2, we conduct an ablation study on the components of the video data engine by finetuning an intermediate image-only checkpoint on 17M of the 22M videos recaptioned by our video data engine. The results show that the video data engine significantly enhances zero-shot classification and retrieval performance for both image and video benchmarks, compared to the image-only baseline encoder (first row). Notably, using the video data engine's video-level and frame-level captions provides significant improvements over relying solely on metadata such as video title and description (second row), highlighting the importance of building a robust video data engine to compensate for noise in web videos. + +
TitleDescriptionVideo CaptionFrame CaptionAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet vial [26]ImageNet vial [12]ObjectNet IV Classes [4]MS-COCO mrt→img [76]MS-COCO mrt→mrt [76]Average VideoKinetics 400 [55]Kinetics 400 [55]MSR-VTT mrt→vid [153]MSR-VTT mrt→mrt [153]48.1
72.683.377.885.849.466.850.969.768.438.027.3
75.483.278.287.147.366.056.074.173.539.037.3
78.283.578.486.856.074.360.973.873.447.648.8
✓*78.183.779.087.754.173.060.975.475.146.746.5
78.283.779.087.554.673.261.675.875.547.448.1
+ +Table 2 Video Data Engine Ablation. We ablate our video data engine in Fig. 5 by finetuning on an in-development image-only version of PE by averaging the frame embeddings to create a single video CLIP embedding. Video captions are generated by PLM trained with or without * human-refined data (see §2.3). Frame captions are generated by the Llama 3.2 vision model. Each component helps on different metrics, overall culminating in a huge boost to both image and video zero-shot performance. + +Our analysis reveals that the most critical components are the video metadata and PLM's video caption; however, all components are necessary to achieve peak performance in our video data engine. + +In Fig. 6, we investigate the impact of scaling recaptioned video data on a later checkpoint of the same image-only model as in Fig. 2. Notably, scaling synthetic video data demonstrates consistent improvement in both image and video benchmarks. Full results of this scaling experiment can be found in the Appendix 19. + +In the top row, scaling synthetic video data consistently improves performance on image benchmarks, with monotonic improvements of $+1.1\%$ in ObjectNet and $+1.6\%$ in ImageNet Adversarial. ImageNet val and ImageNet v2 have smaller gains, with accuracy increases of $0.3\%$ to $0.5\%$ , plateauing at $\sim 7\mathrm{M}$ samples. We also observe a significant boost to zero-shot retrieval (here, COCO [76]) of $+3.8\%$ to $+4.1\%$ top-1 recall. + +The video tasks listed in the bottom row demonstrate a consistent story. We observe a significant jump in performance between none and 3M videos across all video classification tasks, indicating that there is a domain gap for image-only models that hinders their ability to perform well on video out of the box. Further scaling synthetic video data leads to substantial performance gains in both video classification and retrieval. Video classification accuracy improves consistently by $+5.6\%$ to $+11.7\%$ without plateauing, while video retrieval shows significant improvements of $+7.7$ to $+15.3$ top-1 recall. + +![](images/d4b5d63b0451aae57d67aaa102f3b7248bed9096bcbf90710a982c38dc6e97d3.jpg) +Figure 6 Video Data Scaling. Finetuning on videos recaptioned by the PE video data engine from 0M (baseline image-only model) to 17M samples consistently improves both image and video performance, both classification and retrieval. + +![](images/9ec94d9a6be8e905327609a5476b63a26195687f800eb8a20f47f4a56e5d1b8b.jpg) +Category: Food Preparation +Figure 7 PE Video Dataset Example. A sample from PVD, our released video-text dataset. Initial captions are generated by our video captioning model and then refined by human annotators. Annotators are instructed to add details and remove model hallucination. In this example, the model hallucination "a spoon" is removed; and more details such as "glass bowl" and the action "scraping" are added. See Appendix Fig. 18 for more. + +Model caption: "The video shows a person making a cake. The person is seen mixing ingredients in a bowl, pouring the mixture into a pan, and adding chocolate chips. The video also shows the person using a whisk and a spatula to mix the ingredients and pour the mixture into the pan. The person is wearing a blue shirt and is seen holding a glass bowl and a spoon. The background of the video is a white marble countertop, and there is a striped towel on the counter." + +Human refinement: "The video shows a person making a chocolate cake. The person mixes ingredients in a glass bowl and pours the mixture into a baking pan. Then, the person adds chocolate chips on top. The video also shows the person mixing the ingredients with an electric whisk and scraping the mixture into the pan with a spatula. The person is wearing a blue shirt and is seen holding a glass bowl. The background of the video is a white marble countertop, and there is a striped towel on the counter." + +These experiments highlight the quality of our video data engine and its ability to significantly improve encoder performance, even with only a relatively modest 17M videos compared to the billions of images seen during pretraining. Our video data engine is a vital component in build a strong, unified image-video encoder. + +# 2.3 PE Video Dataset (PVD) + +For the benefit of the community, we release a new video dataset: PE Video Dataset (PVD).2 PVD comprises of 1M high-quality and diverse videos with accompanying tags and descriptions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes. + +We additionally select 120K of these videos with the highest degree of motion to annotate with detailed captions by generating synthetic captions using our video captioner (§2.2) and employing 200 annotators to verify and refine them. We ask the human annotators to improve the synthetic captions by removing any hallucinations, correcting words that describe the video inaccurately, eliminating repetitive or redundant words to make the caption more concise, and adding any missing actions being performed in the video. + +We release two versions of annotations for the 120K PVD subset: (1) Human verified captions: extended summaries with an average length of 57.1 words that provide a high-level description of each video. These captions are suitable for CLIP-style training. (2) Long automated captions: detailed and fine-grained descriptions with an average length of 111.7 words that capture spatial and temporal events. These captions are ideal for fine-grained video understanding. + +
Videos998,862
Human Captions118,862
Total Duration4625 hrs
Duration (s)16.7±9.8
Human Caption Length57.1±25.4
Model Caption Length111.7±43.2
+ +Table 3 PVD Statistics. + +In Fig. 7, we visualize a video example together with their model and human captions from PE Video Dataset (See Fig. 18 for more). The dataset statistics are summarized in Tab. 3. Finally, We use $105\mathrm{K}$ of these refined samples to improve the data engine ( $\S 2.2$ phase 2) and $15\mathrm{K}$ as a high-quality video retrieval benchmark. + +PVD Benchmark. We use 15K of the human-refined video-caption pairs as a held-out test set, which we introduce as a new video retrieval benchmark, PVD Benchmark, to evaluate finegrained video-caption alignment. We follow the format of MSR-VTT [153] to construct the benchmark. We select videos from 10 different categories, including hand actions, object interactions, food preparation, work activities, outdoor scenes, animals, water scenes, object handling, close-up shots, and nature scenes, with an overall average caption length of 51.7 words (see Appendix A.2.3 for statistics). We use PVD Benchmark to evaluate SigLIP [160], SigLIP2 [138], InternVL [19], and PE models, and the results can be found in Tab. 7. + +# 2.4 A Unified Encoder for Image and Video + +Using a robust, scalable image pretraining recipe and video-pretraining data recaptioned by the proposed video data engine, in this section we present $\mathsf{PE}_{\mathrm{core}}$ , a unified image-and-video encoder. + +Model Architecture. To capitalize on the promising scaling behavior observed in §2.1, we scale the largest $\mathrm{PE}_{\mathrm{core}}$ model to 2B parameters3 (G scale). Tab. 4 shows the detailed model configuration of the vision and text transformers and the dimension of the output clip embedding space. + +
ScaleTowerParamsWidthDepthMLPHeadsCLIP Dim
BVision0.09B768123072121024
Text0.31B102424409616
LVision0.32B1024244096161024
Text0.31B102424409616
GVision1.88B1536508960161280
Text0.47B128024512020
+ +Table 4 PE Model Configurations. + +Smaller Model Distillation. To maximize the performance of + +smaller models (B and L scales in Tab. 4), we employ a distillation finetuning approach [49] using $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ as the teacher. This process involves a short finetuning schedule where both the student and teacher models encode image and text inputs separately to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence, distilling multimodal relational knowledge from the teacher into the student. + +Notably, we find that using a smaller softmax temperature for the teacher's distributions, specifically $0.5 \times$ the temperature used for the student's distribution, significantly enhances the effectiveness of knowledge distillation. By leveraging the strong embeddings provided by $\mathrm{PE}_{\mathrm{core}} \mathrm{G}$ , our short distillation finetuning schedule significantly boosts the performance of both B and L scale models of $\mathrm{PE}_{\mathrm{core}}$ (see Appendix C.3). + +Model Training. The training process of $\mathrm{PE}_{\mathrm{core}}$ involves three stages: + +1. Image pretraining. We scale up image pretraining to 5.4B publicly available image alt-text pairs curated with MetaCLIP [152] and a total of 86B samples seen to ensure convergence (58B for B and L). We use a global batch size of 131K, with progressive resolution from 98 to up to 448 depending on the model. +2. Image and video finetuning. Following the initial pretraining, we subsequently finetune the model at max resolution with a short schedule for 50M samples on the image pretraining data (as cooldown) followed by 22M samples on the recaptioned videos with a smaller learning rate and batch size. The video captions are produced using the proposed video data engine (§2.2). For each video clip, we uniformly sample 8 frames, encode them, take their average to produce a single video embedding, and align them with the corresponding video captions using the same contrastive objective in image training. +3. Smaller model distillation. We distill the 2B model (G scale) into smaller contrastive pretrained models at B and L scales under their final resolutions, using a short schedule that covers approximately 4B samples seen ( $\sim 8\%$ of the pretraining schedule) with a lower learning rate and no weight decay. + +The detailed training configuration and setups are listed in Appendix B.1.1. + +# 2.5 Core Results + +Zero-Shot Image Results. In Tab. 5, we present $\mathrm{PE}_{\mathrm{core}}$ 's performance on zero-shot image benchmarks for classification and retrieval vs. the strongest existing models, including SigLIP2 [138] and proprietary models using JFT-3B [29], which is likely tuned for ImageNet. $\mathrm{PE}_{\mathrm{core}}$ outperforms all other contrastive models across the board on all zero-shot tasks, including the highly competitive average of zero-shot ImageNet robustness metrics [4, 26, 46, 47, 112, 143]. This marks a significant achievement, as we are the first to accomplish this in over 3 years without access to Google's internal JFT-3B [29] or WebLI [17] datasets. And at the same time, $\mathrm{PE}_{\mathrm{core}}$ also exceeds the existing state-of-the-art on image-text retrieval and significantly improves on fine-grained classification—the first to simultaneously hold state-of-the-art on all common zero-shot categories. + +By harnessing the power of our video data engine, training with a relatively small dataset of 22M videos and their corresponding synthetic captions leads to substantial gains in image benchmarks, with average general image classification improving by $+0.6\%$ with emphasis on more difficult benchmarks (notably $+1.2\%$ + +
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Fine-Grained Classification
Avg Class.ImageNet w1 [26]ImageNet i2 [112]ObjectNet IV Classes [4]ImageNet adversarial [47]ImageNet Adversarial [48]ImageNet Renditions [46]ImageNet Sketch [143]Avg Fine.Food 107 [9]Flowers Oxford [97]Pets Oxford [100]Cars Stanford [59]Aircrafts FGC [88]Countries 2/1 [133]Scenes SUN397 [150]Satellite RESISC [20]Avg Retrieval1Zero-Shot Retrieval MS-COCO t+to ing [76]
Proprietary0.24B2246.6B84.385.786.380.682.385.695.776.1-95.191.297.9--------------------------------------------------0.24B2246.6B84.385.786.380.695.776.1-95.191.297.9-----------------------------------
BASIC [102]1.0B5764.8B85.786.380.695.776.1-95.191.297.9----------------------------72.651.266.380.492.585.786.380.695.776.1-------------------------------------------------MS-COCO t+to ing [76]MS-COCO img→to ing [76]MS-COCO img→to ing [75]
CoCa [158]1.0B5764.8B85.786.380.695.776.1-95.191.297.9---------------------72.651.266.380.492.585.786.380.695.776.1---0.24B2246.6B85.786.380.695.776.1-------------------------------------
LiT-22B [24]
+ +ObjectNet, $+1.4\%$ ImageNet Adversarial) and fine-grained classification by $+1.0\%$ on average. Furthermore, due to the high level of detail and alignment of our synthetic captions, zero-shot retrieval is significantly boosted by $+3.6\%$ on average. These results emphasize that training with well-aligned video text data does not just improve video performance—it creates a strictly better model for both videos and images. + +Zero-Shot Video Results. We assess the performance of $\mathrm{PE}_{\mathrm{core}}$ on zero-shot video benchmarks by employing the same model as a frame-based video encoder, utilizing 8 uniformly sampled frames, as described in §2.2. + +We present the corresponding video results in Tab. 6. Our base image encoder already outperforms all other image-only encoders on both zero-shot classification and retrieval, including SigLIP2-g-opt. With video finetuning, $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ significantly outperforms even native video models that use full temporal attention on video classification, and nearly matches the + +Table 5 Zero-Shot Image Results. Image zero-shot performance of $\mathrm{PE}_{\mathrm{core}}$ compared to the state-of-the-art for both proprietary and open models. $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ is the first vision encoder to outperform the best models trained on the proprietary JFT-3B [29] and WebLI [17] on general classification. Moreover at all model sizes, $\mathrm{PE}_{\mathrm{core}}$ obtains state-of-the-art results across general classification, retrieval, and finegrained classification. $\dagger$ Re-evaluated: DFN by [130]; SigLIP and SigLIP2 by us with the same benchmark settings if not reported in [138] (see Appendix B.1.2). + +
ModelEncoder ParamsResolution #FramesVideo DataZero-Shot ClassificationZero-Shot Retrieval
Avg Class.Kinetics 409 [55]Kinetics 600 [55]Kinetics 700 [55]UCF 101 [126]HMDB 57 [62]Avg RetrievalMSR-VTT 304 [76]MSR-VTT 304 [76]MSVD 304 [76]MSVD 304 [76]MSVD 304 [76]ActivityNet 304 [76]ActivityNet 304 [76]
B Scale
CLIP [106]0.1B2248n/a54.358.455.146.168.943.229.230.424.240.557.29.113.2
CLIP4CLIP [84]0.1B22412n/a-------32.0-38.5---
SigLIP2-B/16†[138]0.1B2248n/a57.358.755.048.482.042.339.938.530.149.067.228.625.8
PEcoreB0.1B224822M63.965.665.155.884.648.249.947.647.350.476.739.038.4
L Scale
UMT-L [67]0.3B224825M------47.140.737.149.074.541.939.4
SigLIP2-L/16†[138]0.3B3848n/a64.165.362.556.886.749.344.741.531.453.774.235.931.5
PEcoreL0.3B336822M71.473.472.765.387.158.554.850.350.157.282.446.442.1
Unbounded Scale
InternVL [19]5.5B2248n/a-69.168.960.6---44.740.2----
InternVideo2 [146]1.0B2248102M70.773.172.864.988.853.959.951.950.958.183.360.454.8
VideoPrism-g* [164]1.1B28816619M-76.4-----39.771.0--52.750.3
SigLIP2-g-opt†[138]1.1B3848n/a68.269.867.061.890.751.846.643.134.255.874.638.333.4
PEcoreG (image only)1.9B4488n/a70.973.172.264.389.555.547.644.335.254.373.941.436.3
PEcoreG1.9B448822M74.876.976.169.190.761.158.751.249.959.785.454.751.2
+ +Table 6 Zero-Shot Video Results. Video performance of $\mathrm{PE}_{\mathrm{core}}$ compared to recent video and image encoders. $\mathrm{PE}_{\mathrm{core}}$ obtains state-of-the-art in video classification and comparable performance on retrieval benchmarks while using only 22M videos. $^*$ Proprietary models. ${}^{+}\mathrm{SigLIP2}$ are evaluated by us with the same zero-shot prompts frame embedding averaging strategy (as in [19, 84, 106]). See Appendix B.1.2. + +state-of-the-art on video retrieval using a simple frame-level encoder. This result underscores the importance of our video data engine, resulting in $+3.9\%$ on average zero-shot video classification, and a massive $+11.1\%$ on retrieval. Moreover, $\mathrm{PE}_{\mathrm{core}}$ does this with much less video data compared to other video-based approaches like InternVideo2 [146] and VideoPrism [164], highlighting the benefits of a joint image-video encoder. + +
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Retrieval
ObjectNet [4]ObjectNet [4]Inaturalist 2017 [140]Dollar St 58 [39, 113]TextCaps img→cat [122]TextCaps Flip img→cat [122]PVD Bench img→vidPVD Bench vid→cat
SigLIP2-B/16 [138]0.1B22410B73.659.116.955.972.069.853.960.1
PEcore B0.1B2245.4B71.958.325.952.172.371.959.861.1
SigLIP2-L/16 [138]0.3B38410B84.473.226.757.678.076.261.967.1
PEcore L0.3B3365.4B84.774.335.359.678.578.364.765.2
InternVL-C [19]5.5B2245B80.667.219.458.272.367.863.465.1
SigLIP2-g-opt [138]1.1B38410B88.078.131.559.378.876.962.567.1
PEcore G1.9B4485.4B88.279.041.162.378.878.777.076.6
+ +Table 7 Additional Zero-Shot Results. We present several additional zero-shot benchmarks from existing datasets and our own PVD (§2.3) to address evaluation gaps left by standard benchmarks. + +
ModelEncoder ParamsResolutionDataEncoder Probing
ImageNet [26]ImageNet [26]ImageNet [26] Attention
DINOv2-g [98]1.1B224145M83.586.5\( 87.2^{\dagger} \)
RADIOv2.5-g [45]1.1B518-85.3--
AIMv2 3B [37]2.7B4487.2B--89.5
InternVL-C [19]5.5B2245B-88.2-
EVA 18B [130]17.5B2242B-88.9-
\( PE_{core}G \)1.9B4485.4B86.889.589.8
+ +Table 8 Encoder Probing Results. We evaluate $\mathrm{PE}_{\mathrm{core}}$ G's frozen features using the typical probing methods to compare to models without zero-shot support. from [37]. + +Additional Zero-Shot Benchmarks. We further evaluate $\mathrm{PE}_{\mathrm{core}}$ on an additional set of zero-shot classification and retrieval benchmarks we construct in Tab. 7 to address key gaps in common benchmarks. For comparison, we also evaluate SigLIP2 [138] and InternVL-C [19] on these benchmarks. + +First, we note that the version of ObjectNet [4] that is standard to benchmark robustness (e.g., in Tab. 5) is not the full set. ObjectNet consists of 313 classes of objects in challenging and uncommon orientations, locations, and viewpoints. However, the standard version used for benchmarking is a 113 class subset of classes that overlap with ImageNet-1k [26]. Naturally, benchmarking in this way rewards performing well on ImageNet classes over generality. To remove this bias, we construct the full ObjectNet set with all classes and compare to the reduced ObjectNet set in Tab. 7. Surprisingly, we find that while $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ performs $+7.6\%$ over InternVL-C and only $+0.2\%$ over SigLIP2-g-opt on the reduced ObjectNet set, it performs $+11.8\%$ over InternVL-C and $+0.9\%$ over SigLIP2-g-opt on the full set of classes, highlighting PE's generality. + +Next, we include iNaturalist [140] as a zero-shot benchmark because of its level of specificity with 2,101 fine-grained long-tail classes. $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ outperforms the next best SigLIP2-g-opt model by $+9.6\%$ , emphasizing PE's long tail knowledge. We then evaluate PE's cultural diversity on Dollar Street $[113]^4$ , which consists of images of under-represented populations. Here too we find $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ to outperform existing methods, with $+3.0\%$ over SigLIP2-g-opt. Further, we test OCR performance by setting up TextCaps [122] as a retrieval dataset. Notably, $\mathrm{PE}_{\mathrm{core}}$ performs on par or better than SigLIP, which is known for good OCR performance. This is potentially surprising, as the horizontal flip augmentation we used during robust pretraining (S2.1) is typically thought to hurt OCR performance. However, instead it seems to have given $\mathrm{PE}_{\mathrm{core}}$ the ability to read backwards: we test the same TextCaps retrieval but with all images horizontally flipped. Other models suffer from this, but $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ 's performance only drops by $0.1\%$ . Finally, we evaluate $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ on the PVD benchmark (S2.3), a challenging video retrieval task on 15K diverse and human-refined videos. Here, $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ significantly outperforms InternVL [19] by $+13.6\%$ on text $\rightarrow$ video and $+9.5\%$ to SigLIP2 [138] on video $\rightarrow$ text. + +Frozen Encoder Probing Results. To compare against models that are not capable of zero-shot classification, we additionally evaluate $\mathrm{PE}_{\mathrm{core}}$ using k nearest neighbors (following [98]), linear probing (following [19]), and attention probing (following [37]) on top of the ImageNet-1k [26] train set. We present these results in Tab. 8 and compare to other encoders using their reported numbers. In every case, $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ outperforms all existing open encoders, including those with significantly more parameters. + +Summary. $\mathrm{PE}_{\mathrm{core}}$ , a unified image-video encoder, achieves state-of-the-art performance across zero-shot classification and retrieval on both images and videos on a wide variety of benchmarks. This synergy is made possible by our robust image pretraining recipe (§2.1) and powerful video data engine (§2.2), which together enable the model to effectively leverage the strengths of both image and video data at scale. + +# 3 General Features in a Contrastive Disguise + +$\mathrm{PE}_{\mathrm{core}}$ puts up strong results on the tasks contrastive encoders are known for, like zero-shot classification and retrieval. But while those tasks are useful, they are only a small part of the vision ecosystem. What really matters is whether or not the features learned with our pretraining recipe are useful to downstream tasks. + +Today's common wisdom in the vision community cites that different pretraining methods result in features useful for different tasks: e.g., contrastive for classification, captioning for language modeling, and self-supervised learning for spatial tasks. To see how $\mathrm{PE}_{\mathrm{core}}$ stacks up against against models with different pretraining techniques, we compare its frozen features to the state-of-the-art large-scale models for captioning (AIMv2-3B [37]) and self-supervised learning (DINOv2-g [98]) on a variety of downstream tasks. + +Layerwise Feature Analysis. We summarize the results of our frozen feature analysis in Fig. 8 for several downstream benchmarks in 3 categories: classification, language modeling, and spatial tasks. For classification, we probe each model using a randomly initialized cross attention transformer block. For language alignment, we use the Perception Language Model (PLM) [21] frozen encoder evaluation setup, learning a projector and finetuning a decoder-only LLM (see §4), and for spatial tasks we train with several different decoders (ViTDet [72] Mask-RCNN [43] with Absolute Win [7] for detection, DPT [109] for depth, and zero-shot feature correspondence for tracking [52]). For each experiment, we sweep over the layers of the model as the optimal features are not necessarily the last [18]. In each case, we use an equivalent image size (window size for detection) of $32 \times 32$ tokens. In each plot, we normalize performance by the maximum and minimum performance across models on that task. + +An Alignment Problem. This analysis reveals several insights. First, as expected, AIMv2 performs well at classification and the best at visual Q&A language tasks. Similarly, DINOv2 performs the well on spatial tasks like detection, depth, and even performs the best at grounding through an LLM. Then as already established by other works: DINOv2 lacks performance on OCR tasks [134]. This is no secret, but what is interesting is that its performance peaks in the middle of the network and then drops significantly by the end. And so does the performance of other models + +![](images/f5fca3271c106d2ca387a323bc011e0ec62f8183ffeae011885fd74e2caabfaa.jpg) +Figure 8 Layer Analysis. Evaluating intermediate layers as frozen features across tasks for different pretraining methods: captioning (AIMv2-3B [37], left), spatially self-supervised (DINOv2-g [98], middle), and our contrastive recipe $\mathrm{(PE_{core}G}$ , right). Vertical lines denote the best layer and horizontal lines the best performance across models. As expected, AIMv2 performs well on language but not spatial, and DINOv2 performs well on spatial but not language. But surprisingly, intermediate layers of $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ perform well on both language modeling and spatial tasks. + +for other downstream tasks (AIMv2: tracking, grounding, detection; DINOv2: VQ&A, grounding). + +$\mathrm{PE}_{\mathrm{core}}$ exhibits similar behavior, but with unexpected results. Unlike the others, in earlier layers of the network $\mathrm{PE}_{\mathrm{core}}$ performs well on all tasks, often matching or exceeding the leading models. Remarkably, PE has intermediate layers that perform near to or on par with AIMv2 for language tasks and DINOv2 for spatial tasks, despite being trained with contrastive loss. Depth estimation is particularly noteworthy, as contrastive encoders are not typically considered state-of-the-art in that area. + +However, in almost all cases this strong performance diminishes rapidly towards the end of the network. In fact, the performance of $\mathrm{PE}_{\mathrm{core}}$ in the final layer is abysmal for certain tasks, such as LLM-based grounding (the reason for which will become apparent in §5). This behavior is less pronounced the closer the downstream task is to the pretraining method, suggesting an alignment problem. Specifically, a well-tuned large-scale contrastive model can learn general embeddings in the process of fitting its objective, but it fails to output them. Therefore, to reveal these embeddings, the model must be subsequently aligned to downstream tasks. + +Analysis. The finding that pure CLIP models possess features which match the performance of state-of-the-art pretraining methods in their specialized domains is new. In fact, recent work [31] has shown the opposite—that CLIP models fail to scale on downstream tasks. We next investigate how our approach yields these results. + +To start, we perform layerwise frozen feature analysis on COCO detection. $\mathrm{PE}_{\mathrm{core}}$ was particularly "peaky" on this task in Fig. 8, with its best layer on par with DINOv2, but last layer significantly worse. We already ablated each change we made from vanilla CLIP in Fig. 2 using a ViT-L/14 model. So to retrace our steps, we run frozen feature analysis on those checkpoints. For efficiency, we perform this experiment at a lower resolution and only sample even layers. In Fig. 9, we report COCO box mAP for the last and best layers for each cumulative ablation, along with the index of the best layer. Further, we plot the layerwise performance for each change in Fig. 10. + +Surprisingly, the simple changes we made in §2.1 to construct our pretraining recipe overall improved the best layer's performance by + +almost $10\,mAP$ over vanilla CLIP! Some changes like high resolution (5) and RoPE (6) improving spatial features is to be expected, but unexpectedly data augmentation (8) and especially progressive resolution (2) help considerably. It is possible that contrastive pretraining is prone to overfit to the "global" nature of the task through "global tokens" [23]. However, as the model cannot maintain global tokens in the same place due to the resolution progressively changing, it is forced to be more robust. Also of note is that both progressive resolution (2) and attention pooling (7) move the argmax layer deeper into the network (rightmost column of Fig. 9). Attention pooling in particular alters the whole shape of the layerwise performance curve (Fig. 10), while the other changes typically only raise or lower it. + +![](images/fc9e5f4a4cb1aee69d2d431bcfb675feed0a0647d32f7580603105b76e8e7e13.jpg) +Figure 10 Layer Analysis corresponding to the results presented in Fig. 9. + +Potentially more interesting is what did not improve performance: specifically, increasing the batch size (3) and using LAMB with a high learning rate (4). Both of these changes explicitly help the model fit the CLIP loss better, which after a certain point may not improve the general features. Moreover, while the best layer overall improved significantly, the last layer performance stagnated after (2). This suggests that constructing the global CLIP token requires a substantial "decoder" (in this case, 6 layers for the final L/14 model). Although the features of this decoder are beneficial for some tasks (e.g., Visual Q&A as shown in Fig. 8), they are not general. Nevertheless, this does not prevent the model from learning general features; it merely limits their expression in the output. + +![](images/bb11b4227ea27e2b7a911634295b0442145b980ca3b98799f6c03070636667d3.jpg) +Figure 9 The Downstream Effects of Robust Pretraining. The ViT-L/14 checkpoints from Fig. 2 evaluated as frozen features on COCO [76] using Mask R-CNN [43]. We report the last layer performance, best layer performance, and the best layer's index. + +Scaling Behavior. Finding a simple, easily scalable vision pretraining method that produces generally useful features has been the white whale of the vision community for a while. Evidently, our robust recipe can enable contrastive pretraining to produce general features. So that begs the question, "does it scale?" + +We can answer this question in the same way: by performing frozen feature layer analysis of our S/14, B/14, and L/14 scaling ablation checkpoints from Fig. 3. We report the result of that analysis in Fig. 11. We also include our final $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ model using the same setup, but note this is an estimate as our ablation and final schedules are different. + +Immediately, we see a stark contrast between the scaling behavior of the vanilla CLIP recipe and ours. While the vanilla recipe quickly plateaus at L scale (300M), the best layer of our robust pretraining recipe demonstrates scaling to G scale (2B) and potentially beyond—despite being trained with a decidedly non-spatially aligned global contrastive loss. However, this is the best layer. The last layer performance still stagnates for both the vanilla recipe and ours. This may be why prior work [31] finds contrastive pretraining to not scale for downstream tasks—CLIP loss obfuscates its general features even with our recipe, placing them several layers deep. + +However, this is just for a single spatial task. To see whether the trend is consistent, we repeat this scaling analysis on a wide variety of downstream language modeling tasks using the same frozen evaluation setup as Fig. 8 and report the results in Fig. 12. Surprisingly, the simple change in pretraining recipe improves scaling for most language tasks as well—including output-side grounding (RefCOCO). Note that in this benchmarking setup, the LLM never sees videos during training so the Video Q&A per-layer results are noisy. Yet, the best layer trend is still the same. + +Clearly, contrastive pretraining with our + +![](images/680e4b398d5756980de8964a501a60f2ed9e9bc97c89dc8c8f2713f06c35df5c.jpg) +Object Detection + +![](images/1b0a4479ace41e4468c9d0b366523e8a1b53945a1a9b0b2119f2404c3d13aa7d.jpg) +Figure 11 The Downstream Scalability of Robust Pretraining. Left: frozen feature layer analysis of the S/14, B/14, and L/14 models from Fig. 3 using the same setup as Fig. 9. Right: scaling behavior of the best layer for each model. Note: G is our final model and has a different schedule. + +![](images/f5558127ca340630103f112802d3339a03ea6bb487c5a14602365c00083566cf.jpg) + +![](images/f0f9c8d5c4a5a11e170bd6d017535455669fe0b3375e69c34192d6d802625bff.jpg) +OCR Q&A + +![](images/1d919e2be4972d7ee8c715e35452670a046aa1a14f011a743ec97cf94f488312.jpg) +Visual Q&A + +![](images/97e0715fa950508aba5efbe4d86caa4736b44d0c0bc64e09a56362282f848505.jpg) + +![](images/8eb0b6d76f12e7e9096fe1271f2475ea64da59c454705c61c47c9dd77a4cdd00.jpg) +Captioning + +![](images/26509c66b2d2df75bdcefbf9805f13ce7b78a3f5f1cd8a7e80b714ae026923a8.jpg) + +![](images/a52d0f7bd5611a851c47ac32cdcbebaceb3ff1ce3d7386aac43357af4d5233e1.jpg) + +![](images/60e724836714f148b095c958f2fa3c8365440358f8725695bdf36b507e2fa403.jpg) + +![](images/78c17213db120c5ebe12a05496435a8d303071b8bdb87f16e11e39ab47c4765b.jpg) + +![](images/20d8251e5ba4bf8d5681fa3ff5be9fcc32ccefbaa9ebd7cec8f10d02b7a25c31.jpg) + +![](images/9c8a0c3c990863113bbb03ad44d3adc6e267d088dfd26283ab9f4a70b3660758.jpg) +Grounding + +![](images/ccbd47c6e6d593acf38cd94b6da64e2f459998bfa155f47040da5a83dd7caea4.jpg) + +![](images/dd821677cb7f3f76bd438b9007918a7b8f2f673bbbe2297e30d83246224039ab.jpg) + +![](images/c6abf374b521c762f6d0b8e1d04cb5578725fcd2cbb8f2abdab0bd9b47747a60.jpg) +Video Q&A + +![](images/8139c17439c85304530ae62c31deae51f505bb24714eb0c4741895c57897056c.jpg) +Figure 12 Further Scalability Analysis. We repeat the analysis from Fig. 11 on a wide range of downstream tasks by adapting to a language model. Each category is an average of several downstream tasks (see §4). + +![](images/92e7132599ebc88ee01bb6b8843129af5bde6cdef2acbe9db261ddbed7c8ddd1.jpg) + +robust recipe produces strong general features that scale. However, these features are not going to be much use stuck in the middle of the network. To remedy this, in the remaining sections we will discuss methods for aligning these general features to the output of the network for both language modeling and spatial tasks. + +# 4 Perception Encoder: Language Alignment + +In §3 we have seen that $\mathrm{PE}_{\mathrm{core}}$ already possesses useful features for vision-language modeling. In this section, we lift these features through alignment tuning to construct a new encoder, $\mathrm{PE}_{\mathrm{lang}}$ , specialized for multimodal large language models (MLLMs). Our principle is to design not only the most performant, but also the most general vision encoder for use in MLLM development. To this end, we want a single language-aligned encoder that performs well across language models, across input resolutions, and for a wide variety of MLLM tasks. + +MLLM Evaluation Tasks. In this section, our main testbed is to adapt vision encoders to MLLMs and test on various MLLM tasks. We evaluate the downstream performance of each MLLM across five task categories: (1) OCR, Chart, Document Q&A on ChartQA [165], DocVQA [91], InfoVQA [92] and AI2D [57]; (2) Visual Q&A on TextVQA [125], OK-VQA [118], POPE [73], and VQAv2 [40]; (3) Captioning on Flicker [157], COCO [76], and No Cap [1]; (4) Video Understanding on VideoMME [38], STAR [148], TGIF-QA [53], EgoSchema [89], MVBenchmark [68], and PerceptionTest [105]; and finally (5) Grounding on RefCOCO [56]. + +# 4.1 Language Alignment Method + +We begin by searching for the optimal language alignment method. We design our alignment tuning based on the midtraining stage of Perception Language Model (PLM) [21], which is to adapt $\mathrm{PE}_{\mathrm{core}}$ to a pretrained decoder-only LLM (Llama 3 [82]) connected by a vision projector. We start with "warmup" training stage with autoregressive next-token prediction loss on 1M image-text samples from pretraining, where everything but the projector is frozen. Then, we proceed to finetune all parameters on 70M data samples [21] covering natural images, documents/charts/diagrams, and videos, using the same next-token prediction loss. After completing this language alignment, we extract the vision encoder from the model and refer to it as $\mathrm{PE}_{\mathrm{lang}}$ . + +To arrive at the optimal training configuration presented in PLM [21], we first conduct ablation studies using a 20M subset of the data. In Tab. 9, we ablate the LLM sizes, training parameters, vision projector types, output layers to project, and encoder regularization. We evaluate across OCR Q&A, Captioning, Visual Q&A, and Video Q&A and find the best configuration. + +LLM Setup. We explore different scales (1B or 3B parameters) and freezing weights of the LLM. We observe that going from 1B to 3B parameters increases average score by 1.6 points $(76.5\rightarrow 78.1)$ . Unfreezing the LLM boosts this number to 78.4. + +Vision Projector. Using a 2-layer MLP vision projector instead of a linear layer improves the average score from 77.2 to 78.1, while only adding few parameters (13.5M → 27M). + +PE Output Layer. As shown in §3, $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ has intermediate layers that perform significantly better than the last layer when used as features for certain tasks. However, it is not clear if that + +
LLM scaleLLM unfrozen Regularization?ProjectorLayerAvg.OCR Q&A Average of 4
Average of 3Captioning Average of 3
LLM Setup
1BMLP4776.560.7115.176.054.0
3BMLP4778.165.9115.776.654.1
3BMLP4778.465.8117.676.353.7
Vision Projector
3BLinear4777.264.5114.176.553.7
3BMLP4778.165.9115.776.654.1
PE Output Layer
3BMLP5075.956.6116.776.553.7
3BMLP4778.165.9115.776.654.1
3BMLP4176.965.5112.875.453.9
PE Regularization
3BMLP4779.969.0117.577.455.6
3BMLP4780.168.7118.377.056.3
+ +Table 9 Language Alignment. We find the best configuration to language align $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ using autoregressive language training. + +same behavior applies when finetuning. We test applying the projector to layers 41, 47, and 50 (the last layer), and find that layer 47 works best. Incidentally, this is also the optimal layer for frozen VQ&A in Fig. 8. + +PE Regularization. We apply LayerScale [135] and DropPath [50] to the vision encoder during the alignment, for stabilizing training. This improves the 78.1 average score to 79.9 (+1.8 points). Unfreezing the LLM boosts this number further to 80.1. We choose this configuration (last row) as our final alignment setup. + +To construct $\mathrm{PE}_{\mathrm{lang}}$ , we scale this recipe up the 70M samples mentioned above (more details in [21]). In summary, we use a pretrained Llama3.2 3B, unfrozen, with a 2-layer MLP as a vision projector on top of layer $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ layer 47 (with the last 3 discarded) and regularize the encoder with LayerScale and DropPath. Compared to the 20M sample ablation setting in Tab. 9, the final $\mathrm{PE}_{\mathrm{lang}}$ trained on 70M total samples gives another +2.1 points to 82.2 on the average across OCR Q&A, Captioning, Visual Q&A, and Video Q&A. + +Effects. The goal of alignment tuning is to lift the strong features found in intermediate layers of $\mathrm{PE}_{\mathrm{core}}$ described in §3 to the end of the network. To see if we actually accomplished that, we perform the same layerwise + +analysis as in Fig. 8 on our final $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ model and compare it to the original $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ checkpoint it was initialized from. We present the results of this analysis in Fig. 13, and immediately we see that language alignment was a success: across all categories, the performing layer for the aligned model was the last, no matter the performance of the original checkpoint. Notably, our $\mathrm{PE}_{\mathrm{lang}}$ training mix did not contain grounding data, which means that this significantly lifted grounding performance is entirely due to the strong intermediate grounding features in $\mathrm{PE}_{\mathrm{core}}$ now being aligned to the end of the network. Moreover, specific domains such as OCR Q&A that were represented in the training mix see a significant boost to performance compared to even the best layer of $\mathrm{PE}_{\mathrm{core}}$ , which was already strong. Thus, with an order of magnitude fewer samples compared to pretraining, we were able to language align $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ to create a single, strong encoder for all visual language modeling tasks. Following this success, we align $\mathrm{PE}_{\mathrm{core}}\mathrm{L}$ in a similar manner to construct $\mathrm{PE}_{\mathrm{lang}}\mathrm{L}$ (see [21]). + +![](images/c532d458f803584390cf5e69b8ff8dfe0debb484e426f8af53a4c2f42efbf43c.jpg) + +![](images/14b60cb5564f062b0f5bb2840805e4b8a21d292381a916d262d40bd14b58afec.jpg) + +![](images/daad2d2e328b97efc76360b8bc401940c602a89b5d2c0a1b5764d1fc9b3b3d3e.jpg) +Figure 13 Language Alignment. We analyze how language alignment changes the internal features of PE. Similar to our $\mathrm{PE}_{\mathrm{core}}$ analysis in Fig. 12, we extract $\mathrm{PE}_{\mathrm{lang}}$ and adapt each layer to a new LLM. + +![](images/78f9649d19944548bbb57462f9d14c6aafc7fcd268b77b96d9d92ea589b00fab.jpg) + +# 4.2 Comparisons with Existing Vision Encoders + +We compare $\mathrm{PE}_{\mathrm{core}}$ and $\mathrm{PE}_{\mathrm{lang}}$ with other vision encoders that are popular choices in MLLM literature: MetaCLIP [152], SigLIP2 [138], CLIP [106], AIMv2 [37], DINOv2 [98], and InternViT2.5 [18]. Overall, these encoders span several different pretraining losses (e.g., contrastive, captioning, self-supervised, and mixed supervision), encoder sizes (from 300M to 6B parameters), and resolutions (from 224 to 512). For all vision encoders, we find the best intermediate layers to train MLLM for fair comparison (more in Appendix B.2). + +MLLM Benchmarking Setup. We connect each vision encoder, including $\mathrm{PE}_{\mathrm{lang}}$ , to a language decoder with a fresh 2-layer MLP projector. Similar to the alignment stage, we first train only the projector on a subset of 1M image-text pairs from pretraining. Then, we train both the projector and LLM on 2.6M visual Q&A pairs, + +
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [11]Avg. Ground RefLOCOg+ [56]Avg. VideoVideoMME Acc. [38]STAR Acc. [148]TGF-IQA Acc. [53]EgoScheme Acc. [89]MV-Bench Acc. [68]PerceptionTest Acc. [105]
CharQA Acc. [165]DocVQA Acc. [91]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1444.947.933.028.770.268.447.662.586.976.5110.587.5130.0114.160.653.946.151.066.458.649.451.9
MetaCLIP-G [152]1.8B224/1444.847.633.127.970.668.848.263.586.576.9111.186.5132.1114.860.553.145.050.766.456.048.751.9
PElang G†1.7B*224/1453.761.347.132.274.171.855.165.386.879.8116.491.0136.9121.265.755.547.355.768.959.648.652.9
576 Tokens per Image
CLIP [106]0.3B336/1453.561.749.532.870.172.760.763.987.378.9113.392.0132.9115.065.054.246.352.168.657.448.552.3
AIMv2-L [37]0.3B336/1453.361.648.032.171.473.762.764.387.780.1115.290.9135.6119.263.352.544.350.967.554.444.953.2
AIMv2 L Dist. [37]0.3B336/1453.761.149.431.572.774.162.864.888.380.3117.894.7137.5121.262.653.844.352.465.057.450.053.6
SigLIP2-so [138]0.4B384/1658.969.058.335.273.176.869.867.288.781.6116.592.1137.7119.867.454.545.553.167.257.649.354.5
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1466.976.873.641.176.176.268.566.089.181.3119.796.1139.6123.468.958.148.758.970.561.852.755.9
1024 Tokens per Image
InternViT 2.5 L [18]0.3B448/1460.674.159.235.973.174.265.464.487.679.6112.388.4133.7114.966.950.645.244.862.754.246.050.5
SigLIP2-so [138]0.4B512/1663.372.169.339.072.777.974.866.089.081.8117.493.5138.3120.269.655.846.255.467.062.050.054.5
PEcore L0.3B448/1459.468.762.536.669.774.767.764.388.378.7112.789.6133.4114.959.750.941.751.261.652.647.450.6
PElang L0.3B448/1471.181.081.946.475.077.173.065.589.380.8117.394.3137.3120.170.556.547.057.268.059.852.354.7
DINOv2-g [98]1.1B448/1430.019.614.724.261.561.019.360.488.675.8109.486.5131.6110.164.949.539.752.160.146.847.450.8
AIMv2 3B [37]2.7B448/1448.940.553.933.967.273.064.164.085.278.9115.793.8135.2118.136.154.645.154.566.755.451.754.3
InternViT2.5-6B [18]5.5B448/1459.972.359.435.272.575.568.964.988.280.2115.092.2136.3116.368.049.644.547.062.645.848.948.5
PEcore G1.9B448/1460.869.965.436.771.173.365.960.788.478.0112.591.6133.6112.466.652.042.353.162.951.448.853.6
PElang G†1.7B*448/1472.480.584.448.376.478.175.265.490.181.8120.196.6140.0123.671.358.048.060.169.462.052.456.0
+ +Table 10 MLLM Results with Llama 3.18B. We compare various vision encoders at their native resolution using Llama 3.1-instruct 8B [82] as the language model. The table compares models of similar class in number of vision tokens and parameters. $\mathrm{PE}_{\mathrm{lang}}$ shows strong performance across all benchmarks, including against models $3\times$ its size. ${}^{*}\mathrm{PE}_{\mathrm{lang}}$ has 1.7B parameters since we discard the last 3 layers during language alignment. $\dagger$ Interpolated without extra training. + +image captions, and image grounding samples (see Appendix B.2 for details). We benchmark at the native resolution of each encoder (with higher resolution tiling results in Appendix C.4). Finally, we ablate over two language decoders, Llama 3.1 8B [82] and QwenLM 2.5 7B [155], to measure generalization across LLMs. + +Results. Tab. 10 shows benchmarks results for native resolution input across existing encoders, $\mathrm{PE}_{\mathrm{core}}$ and $\mathrm{PE}_{\mathrm{lang}}$ . Notably, AIMv2 [37], InternViT2.5 [18], SigLIP2 [138] and $\mathrm{PE}_{\mathrm{lang}}$ are trained jointly with a language decoder using next token prediction objective, and thus they perform better overall compared to the base contrastive and self-supervised models across all the metrics. However, $\mathrm{PE}_{\mathrm{lang}}$ uses a fraction of the training FLOPs for language alignment tuning, while significantly outperforming all vision encoders by large margin (an average of $+3.5$ points for G and $+2.0$ points for L). Similarly, when tiling with 4 tiles and 1 thumbnail (see Appendix Tab. 30), both $\mathrm{PE}_{\mathrm{lang}}\mathrm{L}$ and $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ outperform all existing vision encoders, including InternViT2.5 [18], which was specifically pretrained in a tiling setting and with grounding data. Appendix C.4, shows a breakdown of the RefCOCO results, as well as results for tiling with higher resolution. + +Transferability. As $\mathrm{PE}_{\mathrm{lang}}$ is aligned with Llama 3.2-instruct 3B, we conduct a separate set of experiments to check if our model performs well with a different base LLM. In Tab. 11 we repeat the native resolution comparison with QwenLM 2.5 7B [155]. Interestingly, $\mathrm{PE}_{\mathrm{lang}}$ not only outperforms all vision encoders in this setting, but it also outperforms InternViT2.5 [18], which is specifically aligned to QwenLM 2 [154] throughout midtraining. In fact, $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ with QwenLM even improves its performance with Llama in some cases like with OCR Q&A and video benchmarks, emphasizing the generality of our language alignment. + +
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Avg. Ground, ReCOC%+ [56]
CharQATextVQAFlicker CIDEr [157]Avg. Ground, ReCOC%+ [56]
Acc. [165]Acc. [125]COCO CIDEr [76]STAR Acc. [148]
DocVQADocVQANo Cap CIDEr [1]EGoSema Acc. [89]
Acc. [91]Acc. [92]Avg. Ground, ReCOC%+ [56]VideoOME Mec Aoc. [38]
Aoc. [57]Aoc. [73]Avg. VideoStAR Acc. [68]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1660.572.059.136.774.366.269.065.489.281.1116.391.6137.3120.070.057.051.355.866.061.051.955.7
SigLIP2-g-opt [138]1.1B384/1660.871.060.436.775.276.870.365.689.581.8118.896.4139.0121.169.958.352.057.668.162.052.857.4
PElang G†1.7B*336/1466.877.572.441.176.476.067.965.489.181.5118.894.6139.5122.370.160.254.661.769.863.654.357.2
1024 Tokens per Image
InternViT2.5 [18]0.3B448/1460.375.461.136.268.474.265.663.787.879.5112.188.5133.5114.168.155.850.354.766.659.050.653.8
SigLIP2-so [138]0.4B512/1666.377.271.942.473.977.974.265.689.981.8117.193.0138.0120.370.555.950.357.367.262.650.347.4
PEcore L0.3B448/1463.573.967.440.572.275.769.264.089.480.2113.388.7135.2115.966.557.349.657.867.760.852.355.5
PElang L0.3B448/1470.280.680.746.073.576.872.864.189.481.0116.493.4137.6118.170.458.351.659.867.462.253.455.4
DINOv2 [98]1.1B448/1431.321.714.724.664.361.018.959.588.976.9110.187.3132.1110.869.354.346.956.563.456.849.752.2
AIMv2 3B [37]2.7B448/1466.076.770.541.475.277.974.266.289.481.9119.296.4139.2122.067.656.345.958.067.860.851.453.9
InternViT2.5 [18]5.5B448/1464.278.265.339.673.676.470.164.589.381.7117.695.9138.4118.672.856.150.359.167.356.651.152.2
PEcore G1.9B448/1464.875.968.841.672.975.267.962.489.780.7113.191.7135.2112.370.557.048.758.366.960.852.954.5
PElang G1.7B*448/1472.981.683.749.576.777.974.964.590.381.9118.994.6139.8122.372.160.454.162.568.366.654.256.8
+ +System-Level MLLM Comparison. In Tab. 12, we conduct a system-level comparison to the state-of-the-art open-access MLLMs: LLaVA-OneVision 7B [66], Gemma3 12B [132], Molmo-D 7B [25], Qwen2 VL 7B [144], InternVL 2.5 8B [18] and the very recent InternVL 3 8B [168]. Each baseline uses a contrastively pretrained ViT (SigLIP-so400M [160], CLIP-L [106], DFN-H [33], and InternViT 2.5 300M [18]). For our PLM-8B we use $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ as the vision encoder with 36 tiles for images and 32 frames for video and Llama 3.1-instruct 8B as the language decoder (more details in [21]). We show numbers from their respective works or evaluate them ourselves if they are not reported (except for Gemma and InternVL 3). PLM-8B outperforms all other models tested, emphasizing that $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ can be used to drive strong results across a wide range of tasks. + +Table 11 MLLM Results with QwenLM 2.5 7B. Same setting as Tab. 10, but with QwenLM2.5 7B [155] as the language model. Although $\mathrm{PE}_{\mathrm{lang}}$ is aligned to Llama3.2 3B, the language alignment transfers well to a different language model. + +
ModelEncoderOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QA Acc. [165]Doc.VQA Acc. (test) [91]Info. QA Acc. (test) [92]Avg. VQA Text.VQA Acc. [125]OK-VQA Acc. [118]POPE Acc. [73]VQAV2 Acc. (val) [40]Avg. Cap. Flicker CIDEr [157]COCO CIDEr [76] No Cap CIDEr [1]Avg. Video Video.MME Acc. [38]STAR ACC. [148]TGIF-QA Acc. [53]EgoScheme (test) Acc. [89]MV.Bench Acc. [68]PerceptionTest Acc. (test) [105]
LLaVA-OV 7B [66]SigLIP-so400M81.480.086.768.890.179.977.369.689.283.579.555.770.7112.163.857.766.077.265.257.158.1
Gemma3 12B [132]SigLIP-so400M-75.787.164.9--67.7--71.6----------54.9
Qwen2 VL 7B [144]DFN-H86.683.694.576.591.780.983.667.988.383.893.779.9102.598.767.762.967.381.865.461.666.9
InternVL 2.5 8B [18]InternViT 2.5-300M87.084.693.077.692.879.979.369.290.680.6113.096.5125.8116.772.960.677.691.366.272.668.9
InternVL 3 8B [168]InternViT 2.5-300M87.286.692.776.892.6-80.2-91.1------66.3---75.4-
PLM-8BPElangG88.485.594.680.992.782.986.569.689.985.6127.4105.6146.7129.977.958.384.995.568.877.182.7
+ +Table 12 MLLM System-Level Comparison. We show a system-level comparison between PLM-8B based on $\mathrm{PE}_{\mathrm{lang}}\mathrm{G}$ and popular open-access models of similar LLM scale using existing encoders. We report test set results where specified. + +# 5 Perception Encoder: Spatial Alignment + +While language alignment with a pretrained LLM decoder is well-established, the best way to spatially align a model is not obvious. As shown in §3, $\mathrm{PE}_{\mathrm{core}}$ already has features that perform well for spatial tasks. However, the layer that performs the best for higher level spatial tasks like detection or depth estimation (layer $\sim 40$ ) is vastly different than the layer that performs the best for a pure spatial task like tracking (layer $\sim 30$ ). While we were able to ignore this disparity during language alignment by aligning to an LLM decoder that could do all tasks, classical spatial tasks have decoders that come in all shapes and sizes. It would be impractical to simply align the model using all downstream decoders mirroring language alignment. Thus, we must first answer the question, what is happening in the features at those layers to make them useful for spatial tasks? + +# 5.1 Core Feature Analysis + +We begin by analyzing the spatial properties of the features for $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ in the range of layers where it performed optimally for zero-shot tracking in §3. In Fig. 14, we plot (1) the pairwise feature cosine similarity between the pink token and all others, (2) the head average attention map for that token, and (3) the full attention matrix $(HW\times HW)$ . + +An 18 Layer Decoder. Remarkably, the cause for the tracking performance peak at layer 32 is abundantly clear from observing + +![](images/a542d95f67d6e1d95a991683d76f091ce129ea008cea4253e777d56226a43c1e.jpg) +Figure 14 $\mathsf{PE}_{\mathrm{core}}\mathsf{G}$ Feature Analysis. To understand the dichotomy between optimal $\mathsf{PE}_{\mathrm{core}}$ features for spatial tasks observed in Fig. 8, we analyze the spatial properties of the features between layers 30 and 34. + +the visualizations. Up until layer 32, the attention maps remain local. However, that changes abruptly at layer 33, at which point several tokens in the background of the image become "global" tokens. As shown by the vertical lines in the full attention matrix, starting from layer 33 every token attends to them. Thus, every layer 33 and up become part of a decoder for global information. + +This is not a new phenomenon. Recent work [23] shows this happening in all modern vision transformers above L scale. But notably these "global tokens" are not necessarily harmful. Given the optimal layer for most tasks in Fig. 8 lies within the global token region, the information they aggregate is useful downstream. However, tracking in §3 is zero-shot and relies purely on spatial correspondences, meaning it cannot make use of the global tokens. This explains why tracking peaks right before their introduction, while tasks that rely on semantic understanding or have larger decoders that can benefit from them do well with the later layers. + +# 5.2 Spatial Alignment Method + +Given the analysis in §5.1, we have two objectives in creating a spatial alignment method: (1) we must preserve the optimal semantic information of the model (including the global tokens) that peaks around layer 40, and (2) we must do so while emphasizing local alignment in service of spatial tasks with shallow decoders. The first can be easily achieved by aligning with the model's own features (e.g., with MaskFeat [147]), but the second is more challenging. To accomplish this, we employ the Segment Anything Model (SAM) 2.1 [111] in a novel way to enforce spatial correspondence information in PE. + +Retaining Semantics. To retain the strong semantic features from $\mathrm{PE}_{\mathrm{core}}$ , we finetune the model with itself as a teacher. Specifically, we train the model to minimize the cosine similarity between its last layer and the frozen layer 41 features of its initialization (a layer around the peak for many tasks in Fig. 8). On its own this would be a tautology, so we apply heavy regularization to the student: DropPath [50] and LayerScale [135] similar to language alignment, as well as performing MaskFeat [147] with $75\%$ masking. We keep the teacher + +fixed in contrast to other state-of-the-art spatial models, which all employ an EMA teacher [98, 138]. This could potentially help, but we opt for simplicity. + +Encouraging Locality. While we could "retain" locality by self-distilling from layer 32 features, that may be less effective as we are already distilling another layer of the model. Instead, we turn to a model that is explicitly tuned for locality: SAM [58, 111]. Notably, several works [110, 116, 119] have shown SAM to not be an effective teacher when distilling from multiple sources (though recently [45] has shown it can help with some tricks). However, upon observation of the raw features of SAM 2.1-L (Fig. 15), the main problem may be the same one we are currently trying to solve: SAM has global tokens as well! In this case, + +![](images/d041c7193a207d97bff0767ce452ad71d55bf14f0d2698e25d892c6237ddce26.jpg) +Figure 15 SAM 2.1 Feature Similarity. The cosine similarity between the pink marked token and all others for SAM 2.1-L [111] features vs. our proposed mask logit features. + +they appear as dark spots in a grid-like arrangement across all examples in Fig. 15 raw features. + +Using the features of a model that itself has global tokens to mitigate the effect of global tokens is dubious at best. But, we don't have to use SAM's features to learn locality. At its core, SAM is a model that transforms points into spatially contiguous masks of select object. If what we want is smooth, locally consistent features, we can use the mask predictions themselves. Specifically, we query SAM 2.1-L with 1024 points arranged in a $32 \times 32$ grid. For each point, SAM returns a $H \times W$ mask logit the size of the image, which it normally would threshold and NMS. However, we instead concatenate those logits into a $H \times W \times 1024$ tensor and use that as the feature map for alignment. This explicitly produces locally well-aligned features compared to the underlying feature space and has no spatial artifacts caused by global tokens, as shown in Fig. 15. + +Then to align, we distill the spatial correspondences between tokens by computing their pairwise cosine similarity for both the student and the teacher (creating a $HW \times HW$ matrix for each) and aligning them with MSE loss. Unlike SAM's underlying feature space (which [45] shows may be brittle to interpolation), the mask logit features are robust to interpolation, so we simply interpolate them down and train at the $\mathrm{PE}_{\mathrm{core}}$ model's original 448px resolution. Finally, like for self-distillation we add the same masking and regularization. For both teachers, we apply loss to all tokens and add no extra parameters other than LayerScale. + +Effects. Again, the goal of alignment is to lift the strong features already learned by the core model as shown in §3. Thus, like we did for language alignment in §4.1, we perform layerwise frozen feature analysis on spatial tasks in Fig. 16. This time, we evaluate the original $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ checkpoint as well $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ aligned to its own layer 41, to SAM 2.1 mask logits, and finally both. We denote aligning to both as $\mathrm{PE}_{\mathrm{spatial}}\mathrm{G}$ . + +Aligning purely based on the original model's layer 41 features performs well on detection, depth, and semantic segmentation, but falls short for zero-shot tracking, where precise locality is necessary to define boundaries between objects. In contrast, aligning to SAM 2.1 mask logits lowers last layer performance on every task except for tracking, where it significantly improves performance. Understandably, this is because the mask logits have little semantics (see Fig. 17). Thus, the optimal approach is to combine both teachers. As a result, $\mathrm{PE}_{\mathrm{spatial}}\mathrm{G}$ not only lifts the features for all tasks to the end of the network, but it also improves over self-alignment alone. Notably, $\mathrm{PE}_{\mathrm{spatial}}\mathrm{G}$ s tracking performance is lower than + +the SAM-aligned model, but it is still ahead of other methods while being a generally good model, see §5.3. + +![](images/b8d97850bf3742315f6fb8c066d8dda1568ae7083e36a3556ca7fe5042281f80.jpg) + +![](images/3e39ae7eaea818e7ae127909f0ae634826915332b7a34afe41e57b9b94cbbbd5.jpg) + +![](images/60d858a7dc9bdf799b7f99cce57d2f46fdf7293c74a4009040792d6e25d33c2a.jpg) +Figure 16 Spatial Alignment. We analyze how our two spatial alignment methods individually change the internal features of $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ . Then we combine both alignment methods to create $\mathrm{PE}_{\mathrm{spatial}}\mathrm{G}$ (see Appendix B.3.1). + +![](images/1a404914c26bae32a66185bf5d5c70c669e15b4a0244ddd49a1a6aed2e99497c.jpg) + +Last Layer Feature Visualization. In Fig. 17, we visualize the last layer features for the $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ and the 3 aligned models, with similar colors denoting similar features. In the first column, we see why the last layer performance of $\mathrm{PE}_{\mathrm{core}}$ is so poor: while the last layer features contain information about the salient objects, they seem to have lost spatial coherence. Aligning to the model's own layer 41 features fixes this, but its spatial quality is lacking. In contrast, the model aligned to SAM 2.1 mask logits has locally clear features, but without semantics (similar objects have dissimilar features, see row 1 cats and row 2 cows). $\mathrm{PE}_{\mathrm{spatial}}$ using both teachers at once, retains the semantics of $\mathrm{PE}_{\mathrm{core}}$ while producing high quality spatial features. + +![](images/e230c7cf4ab7709671d53765702b74a3a0690485de5e999521b719874d64f7c2.jpg) +Figure 17 Last Layer Visualization for the models in Fig. 16 using 3 dimensional PCA to map features to LCh color space (see Appendix B.3.2). More examples in Appendix C.5. + +
EncoderParamsResolutionTrackingSegmentationDepth
DAVIS (↑) [104]ADE20k (↑) [167]NYU (↓) [123]
BestLastIdxBestLastIdxBestLastIdx
OAI CLIP-L [106]0.3B224/1439.437.117/2439.438.319/24.366.39719/24
AIMv2-3B [37]2.7B448/1454.729.313/2441.631.920/24.311.32616/24
SigLIP-so [160]0.4B384/1448.736.316/2740.138.322/27.339.36921/27
SigLIP2-so [138]0.4B512/1651.445.315/2744.042.924/27.306.32925/27
SigLIP2-g-opt [138]1.1B384/1643.538.832/4042.141.334/40.302.32434/40
DINOv2-L [98]0.3B448/1458.758.223/2447.347.324/24.297.30823/24
DINOv2-g [98]1.1B448/1458.558.540/4048.748.437/40.279.29027/40
PEcoreG1.9B448/1456.842.832/5041.538.644/50.249.30939/50
PEspatialG1.9B448/1461.561.550/5049.348.949/50.262.27546/50
+ +Table 13 Frozen Feature Dense Prediction including zero-shot tracking, semantic segmentation and depth estimation. We report best and last layer performance, along with which layer was best for each model. See Appendix B.3.3 for experimental settings. + +
EncoderParamsPretrain ResolutionLVIS [41]COCO [76]
APboxAPmaskAPboxAPmask
OAI CLIP-L [106]0.3B224/1445.041.954.047.5
MetaCLIP-G [152]1.8B224/1445.141.953.246.7
SigLIP-so [160]0.4B224/1445.041.954.447.6
MAE-L [44]0.3B224/1446.143.955.649.3
EVA02-L [35]0.3B224/1449.345.254.948.2
SigLIP2-so [138]0.4B512/1649.345.656.049.4
SigLIP2-g-opt [138]1.1B384/1652.948.557.150.2
DINOv2-L [98]0.3B518/1446.743.555.749.0
DINOv2-g [98]1.1B518/1451.547.357.250.0
PEcoreG1.9B448/1451.947.957.049.8
PEspatialG1.9B448/1454.249.357.850.3
+ +# 5.3 Comparisons with Existing Vision Encoders + +Frozen Feature Dense Prediction. In Tab. 13, we compare different vision encoder's frozen features on three dense prediction tasks: DAVIS tracking [104] (J&F) following the training-free setting from [52, 107], ADE20k semantic segmentation [167] (mIoU) linear probing, and NYU depth estimation [123] (RMSE) with a DPT head [109]. For each model, we report both its best layer and last layer performance. Across the board, $\mathrm{PE}_{\mathrm{spatial}}$ performs outperforms other state-of-the-art spatial models, with its best features being much better aligned to the last layer than the $\mathrm{PE}_{\mathrm{core}}$ it started from. Notably, SigLIP2, which during pretraining combines spatial, captioning, and contrastive losses [138] is not aligned well to the last layer in comparison. + +End-to-End Finetuning Detection and Segmentation. In Tab. 14, we compare $\mathrm{PE}_{\mathrm{core}}$ and $\mathrm{PE}_{\mathrm{spatial}}$ with other popular vision encoders in the standard full-finetuning ViTDet [72] Mask-RCNN [43] setting using COCO [76] and LVIS [41] as benchmarks. In this controlled experiment, $\mathrm{PE}_{\mathrm{spatial}}$ is state-of-the-art among various vision backbones. This is significant, as contrastive encoders (especially large ones like MetaCLIP-G [152]) usually perform very poorly on detection, with smaller models often performing better. Typically, encoders only scale for detection if using spatial pretraining or a significant amount of detection data [98] is used to align them directly to downstream tasks. In contrast, $\mathrm{PE}_{\mathrm{spatial}}$ uses no detection data for alignment, making it general. + +System-Level Detection. In Tab. 15, we provide a system-level end-to-end finetuning comparison vs. the absolute state-of-the-art in COCO detection. With only Object365 [120] as extra detection data, $\mathrm{PE}_{\mathrm{spatial}}$ can match the performance of more complex models tuned for detection, while only using a simple DETR-style decoder [12, 99]. $\mathrm{PE}_{\mathrm{spatial}}$ marks the first general, contrastively pretrained model to accomplish this. + +Table 14 End-to-End Finetuning Detection and Segmentation using Mask R-CNN [43] and VitDet [72] in a controlled setting. Details in Appendix B.3.4. + +
EncoderParamsDetectorCOCO APbox
SwinV2-G [80]3.0BHTC++ [14]62.5
Swin-L [79]0.3BDINO [161]63.2
EVA02-L [35]0.3BCascade [11]64.1
InternImage-G [145]3.0BDINO [161]65.3
EVA02-L [35]0.3BCoDETR [169]65.9
PEspatialG1.9BDETA [99]66.0
+ +Table 15 System-Level Comparison on Detection. Comparing to the leading results on COCO [76] val2017. See Appendix B.3.5 for training recipe. + +# 6 Related Work + +Learning vision-semantic representations has long been the leading approach for developing foundational models in perception. By aligning visual and textual representations, these models excel not only in vision tasks such as zero-shot image classification and image-text retrieval [51, 106, 117], open-vocabulary detection [63, 94, 95] and segmentation [22, 28], but also serve as the basis for multi-modal large language models (MLLMs) [3, 5, 78, 93, 101, 134]. + +Contrastive Language-Image Pretraining. The early works of Virtex [27], ICMLM [115], and ConViRT [163] developed the techniques for learning through contrastive objectives between vision and language modalities. Subsequently, vision encoders such as CLIP [51, 106] and ALIGN [54] scaled these techniques to much larger datasets and model sizes, popularizing vision-language contrastive learning. A series of open-weight contrastive models have been developed to enhance the performance and robustness of CLIP [33, 71, 117, 129, 152, 160]. For instance, SigLIP [160] replaces the traditional softmax with a sigmoid function in contrastive learning, while FLIP [74] employs masking techniques to expedite the training process. We are among this effort and build a state-of-the-art open Perception Encoder (PE) (§2.1). Other objectives that have proven useful for building visual encoders include captioning loss, which learns to predict image descriptions using a language model decoder and transfers well to downstream multi-modal language modeling tasks [37, 137]. Many works are now attempting to combine two or more objectives to address different downstream tasks through pretraining with multiple objectives [37, 158] or training sequentially [19, 66]. + +Efficient Training. Various axes of efficient training of clip models have been explored. BASIC [102] and LAION [117] explored scaling the batch size up to 160K, and shows the benefits of large batch sizes during training. EVA-CLIP [130] uses LAMB optimizer [156] for large batch training of clip models. Rotary positional embedding (RoPE) [127] has been successfully adopted in large language models. In vision transformers [2, 48] adopted 2D rotatory positional embeddings. For data engine, a series of works focus on large-scale sourcing and filtering through efficient data curation [33, 39, 117, 152] and explore recaptioning training images using MLLMs or VLMs [32, 64, 96, 151]. We extend these concepts to build a video data engine and scale our model to function as one strong model for both image and video (§2.2). + +Best Embedding Layer Inside the Network. Typically, most vision encoders rely on the last layer to extract features for the task it is trained on. However, when trained on proxy or self-supervised tasks, the last layer is often not the ideal candidate for other tasks [8, 15, 16, 30, 85, 107, 121, 128, 142, 159, 166]. For example, when using image colorization as pretraining objective, [162, 166] showed that the middle layers were better at image classification compared to last layers. Subsequently, in iGPT [15], when trained for next token prediction, intermediate layers performed better at image classification. AIMv1 [30] also showed similar behavior for image based next token prediction with patch normalized MSE loss. Toto [107] showed this can be extended for next token prediction in videos, and intermediate layers are best for image classification, video classification, tracking and robotics. REPA [159] showed this behavior for image generation models, where the intermediate layers of SiT [85] has better linear probing accuracy compared to earlier or later layers. In CLIP models, CLIPer [128] identified that early layers in CLIP possess good spatial understanding. In contrast to these lines of work, in this paper, we first show this behavior is not limited to one class of encoders. Specifically, we show this behavior exists in a spatially self-supervised model [98], generative captioning model [37], and also in our own PE. Then we study this behavior for PE encoder in depth, and show it is possible for CLIP training to produce rich spatial and semantic features in intermediate layers (§3). + +Alignment Tuning. We explore alignment tuning for language (§4) and for spatial understanding (§5). For language alignment, we focus on adapting to multimodal large language models (MLLMs); for spatial alignment, we employ self-distillation of the models own features combined with a teacher for locality. In MLLM literature, midtraining—i.e., a middle stage of training used to exploit large-scale multimodal data—has been actively studied. LLaVA-OneVision [66], InternVL series [18, 19], QwenVL series [3, 144], and several other leading MLLMs [82, 132] adopt this paradigm. Our $\mathrm{PE}_{\mathrm{lang}}$ can be seen as a variant of midtraining, but with one critical difference in principle: our goal is not to build the best MLLM, but to make the vision encoder the most general. Throughout §4, we benchmark our $\mathrm{PE}_{\mathrm{lang}}$ across different language models, input resolution, on various tasks for image and video to show this generality. For spatial tasks, we utilize the hidden embeddings + +in the intermediate layers. Recently, several works showed the effectiveness of distilling teacher model via representation alignment with cosine similarity. REPA [159] distilled an early layer features of DINO for image diffusion models, RADIO [110] used multi-teacher distillation (DINO, CLIP and SAM). The key idea is to borrow semantic understanding (e.g., CLIP) and spatial understanding (e.g., SAM, DINO) of a pretrained vision encoders. In our $\mathrm{PE}_{\mathrm{spatial}}$ , we exploit the intermediate features of $\mathrm{PE}_{\mathrm{core}}$ for semantics, and a novel way to use SAM for spatial understanding. + +# 7 Conclusion + +We have presented Perception Encoders (PE), a family of best-in-class foundation models comprising $\mathrm{PE}_{\mathrm{core}}$ , $\mathrm{PE}_{\mathrm{lang}}$ , and $\mathrm{PE}_{\mathrm{spatial}}$ . We have shown that $\mathrm{PE}_{\mathrm{core}}$ can outperform models trained with WebLI and JFT-3B, which were previously the undisputed leaders in zero-shot image recognition, while also excelling in zero-shot video recognition. We have demonstrated that $\mathrm{PE}_{\mathrm{lang}}$ can be used to build a multimodal language model [21] that is at the forefront of the field in terms of performance. We have established that $\mathrm{PE}_{\mathrm{spatial}}$ can match the long-standing state-of-the-art in object detection with a significantly simpler decoder. Throughout all of this, one conclusion is abundantly clear: Perception Encoder unlocks the potential to scale simple contrastive vision-language pretraining to address a wide range of downstream vision tasks. + +Additional Contributors and Acknowledgments. We would like to thank Abhimanyu Dubey, Adel Ahmadyan, Andrew Westbury, Arkabandhu Chowdhury, Azita Shokrpour, Babak Damavandi, Chay Ryali, Cyprien de Lichy, Didac Suris Coll-Vinent, Dong Wang, Filip Radenovic, George Orlin, Han Zou, Harry Tran, Jitendra Malik, Joelle Pineau, Joseph Greer, Kavya Srinet, Kirmani Ahmed, Laura Gustafson, Lu Zhang, Muhammad Maaz, Natalia Neverova, Nicolas Carion, Oleksandr Maksymets, Ramya Raghavendra, Romy Luo, Ronghang Hu, Sam Doud, Sasha Mitts, Sean Bell, Shane Moon, Shuming Hu, Soerian Lieve, Stephane Kasriel, Valentin Gabeur, Vanessa Stark, Vignesh Ramanathan, Vivian Lee, Xuan Hu, Yang Li, and Ziyang Wang for their contributions and support for the project. And we thank you, the reader, for reading this far. + +# A Video Data Engine + +# A.1 Video Caption + +# LLM Summarization prompt + +# LLM Summarization prompt 72 tokens + +Create a concise caption of a video using the provided metadata, video caption, and frame captions. + +TASK: Extract key information from the captions and combine it into an alt text format using single phrase or set of phrases that includes all relevant details. + +Steps to Follow: + +1. Review the metadata (title and description) for general context, you can rely it for entity names but do not rely on it as the primary source of information for your caption. +2 . Blend title / description with video caption and frame captions for the main storyline +3. Extract the most relevant and concise information. +4. Combine extracted information into a alt text format using short phrase or set of phrases with approximately 120 tokens, considering special characters like comma as part of the token count. +5. Prioritize including all key information over sentence structure or grammar. +6. Minimize the use of special characters and focus of key information. + +What to Avoid: + +- Avoid adding or inferring information not present in the original metadata and captions. +- Avoid using complex sentence structures or prioritizing sentence flow. + +Create a concise caption of the video based on the metadata, video caption, and frame captions. + +# A.2 PE Video Dataset Details + +PE Video is a dataset that we collected and curated from a licensed data source. The videos are high-resolution and high-quality with a focus on motion. The total number of videos is 1M. Among these, 120K videos have human-refined video captions, and we selected 15K from the 120K videos as a benchmark. + +# A.2.1 Video Data Filtering Pipeline + +The goal of video data filtering is to identify videos that contain motions such as object motion, camera motion, interaction between objects, human actions, sequences of actions, and manipulation of objects, while rejecting videos with static scenes, like landscapes, or those that are artificial or highly edited. + +To achieve this, we created a video filtering pipeline consisting of the following steps: + +Step1: Compute motion features. For each video, we compute a list of features from video frames, including frames per second (fps), number of frames, number of I-frames, motion vector magnitude, and motion vector variance, using off-the-shelf tools like OpenCV [10]. +Step 2: Extract video frame features. For each video, we uniformly sample three frames and encode them using a DINOv2 model [98] and a SigLIP model [160]. +Step 3: LLM Features. For each video, we also run a multimodal large language model (LLM) like LlamaOnevision QwenLM 2 0.5B [66] to extract MLLM features. We composed a list of 26 questions and performed MLLM inference on the videos. The questions can be found here in §A.2.2. +Step 4: Video Quality Scoring. We combine all the features collected so far and use a random forest model to predict a score between 0 and 5. To train the model, we manually annotated approximately 1,000 videos with scores between 0 and 5. A low score indicates that the video is almost static and can be nearly summarized by a single frame, while a high score indicates that there are multiple temporal events in the video, requiring several frames to accurately caption it. We use these annotated videos as training data to fit a random forest model for video quality score prediction. +Step 5: We apply k-means clustering to the videos and rank them within each cluster. By selecting the top-ranked videos from each cluster, we effectively reduce the number of duplicated videos in the final dataset. + +# A.2.2 LLM Feature Extraction + +# LLM Feature extraction question list + +Is the camera capturing the scene static? Reply yes or no. + +Is the camera capturing the scene moving? Reply yes or no. + +Is the video capturing a landscape? Reply yes or no. + +Is the video capturing a static scene? Reply yes or no. + +Is the scene captured from a distance? Reply yes or no. + +Is the video captured with a drone? Reply yes or no. + +Is the video computer-generated? Reply yes or no. + +Is the video content abstract? Reply yes or no. + +Is there something moving through the scene? Reply yes or no. + +Is there someone doing something in the video? Reply yes or no. + +Are there several things moving in the video? Reply yes or no. + +Is there an object that is being manipulated? Reply yes or no. + +Are there animals in the video? Reply yes or no. + +Is the scene mostly static? Reply yes or no. + +Are things occluding each other in this video? Reply yes or no. + +Is there something obstructing the view apart from the watermark? Reply yes or no. + +Is there a large number of things in the video? Reply yes or no. + +Are there more than 5 different objects in the video? Reply yes or no. + +Is it hard to keep track of some entities because they are moving so much? Reply yes or no. + +Is someone looking at a phone, a tablet or a computer screen? Reply yes or no. + +Are they looking at a phone, a tablet or a computer screen during the whole video? Reply yes or no. + +Are there several moving persons in this video? Reply yes or no. + +Are there several moving animals in this video? Reply yes or no. + +Are there several objects in this video? Reply yes or no. + +Are there several similar-looking objects in the video? Reply yes or no. + +Do they look similar? Reply yes or no. + +We use LLaVA-OneVision [78] model to extract LLM features from the videos. For each video, we prompt with 26 different questions to extract features ranging from, "is the video a landscape video?" to, "are there any moving objects in the video?" The features are then used by a random forest model to determine the video quality score. + +# A.2.3 PVD Benchmark Distribution + +
CategoryNumber of videosAvg. Caption Length
Hand Actions214354.2
Object Interactions186442.6
Food Preparation169156.8
Work Activities168947.8
Outdoor Scenes155850.7
Animals142350.9
Water Scenes133744.6
Object Handling130751.6
Close-up Shots112245.1
Nature Scenes86638.4
+ +Table 16 PVD Benchmark Statistics. We created a dataset of 15K videos together with human-verified captions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes. + +![](images/f60491c1bd688dfc6de41ba84a4f1eabcb23cc6b5712bda279c86259deaa53bc.jpg) +Category: Hand Actions + +Caption: The video captures a closeup shot of person typing on a keyboard. The camera moves from the left side of the keyboard to the right, an animation of the revolving globe and some numbers can be seen in the frame and the video ends. + +![](images/6e9ec06538492e622f82c5cc96b947e2ebc7d1e0c82c1caab229a7bc02bc9d11.jpg) +Category: Object Interactions + +Caption: The video shows a black and white spiral that is spinning. The spiral is made up of alternating black and white stripes that are evenly spaced and symmetrical. + +![](images/5588664e10ed778735c066db9c914cdbe8fd018b0c878322cb89d5734afe3da3.jpg) +Category: Food Preparation + +Caption: The video shows a person cutting an green color item into small pieces. They are using a knife to slice the pickle into thin pieces, and then chopping those pieces into smaller cubes. The person is working on a wooden cutting board, and the Hands are visible from the left side of the frame with pink nail paint on their nails. + +![](images/7478d5141e36888fb5f937cef316c63db3911b313ad911182a76d9d80cc5f380.jpg) +Category: Work Activities + +Caption: The video shows a person using a shovel to clean the ashes from a fireplace. They are scooping up the ashes and removing them from the fireplace. + +![](images/a7faa8bb779978d62ee631eba3e97506f422bda9d1a13e3b20786dd962483c5d.jpg) +Category: Outdoor Scenes + +Caption: The video shows a tall, pointed structure in the middle of a field. and the structure is surrounded by trees and other vegetation. The field is divided into sections, with some areas covered in green grass and others covered in white material. The video shows the structure and the field from a distance, with the camera moving around it. + +![](images/e1f6bd1a6e1428498e5dad05ef9684dc27e65ce2230edda4cda2e837c0fd68b8.jpg) +Category: Animals + +Caption: The video shows a white and gray adult cat and two kittens. The adult cat is grooming the kitten closest to it with its tongue, and the kitten is looking around. A hand reaches out from the frame's upper left to pet the two kittens. + +![](images/319104e044a229bfe0fc6e17d447be4ef0e0b642fbd22115c20140690a4d8292.jpg) +Category: Water Scenes + +Caption: The video shows a large school of fish swimming in a water body towards the right frame. The camera too pans a little to the right. + +![](images/de294a290d605ae638c932a5b630ab3c7b2b5a3f0a9a6ff7d04285e7b65f6ea8.jpg) +Category: Object Handling + +Caption: The video shows a person putting a bowl of something into an oven. The person then closes the oven door. The background is blurry. + +![](images/84daeda79e0b2b2f621e8931263c95a39b53a1427968537d1a6a10eec3f36ab8.jpg) +Category: Close-up Shots + +Caption: The video shows a white counter with two brown buckets and a yellow bucket. Then a person's right hand wearing a green glove enters the frame from top right side and place a yellow flower near to yellow watering can. The person then places the flower, in front of the buckets and exits the frame. In the background is a brown wall, and the camera is static throughout the clip. + +![](images/7a97e51e5aed728ca60da5c0696ac87233b5df8d196112a189b1a35ae2cf82df.jpg) +Category: Nature Scenes +Figure 18 More PE Video Dataset Examples. For each of the ten categories, we randomly pick one video and show its video caption. The captions were generated by our video data pipeline and then refined by human annotators. + +Caption: The video shows a pile of branches and leaves on fire in a field. The fire is burning brightly, with flames licking at the edges of the pile. The smoke from the fire rises into the air, billowing up into the sky. + +# B Implementation Details + +# B.1 PE Core + +We provide additional implementation details for building $\mathrm{PE}_{\mathrm{core}}$ . Our implementation is based on OpenCLIP5. + +# B.1.1 Architecture and Training Setups + +Model Architecture. Following CLIP, $\mathrm{PE}_{\mathrm{core}}$ comprises a Transformer-based [141] vision and a text encoder. We employ customized Transformer configurations as detailed in Tab. 17. For pooling, we an attention pooling block in the style of SigLIP [160] with 8 heads from the last-layer feature to construct image and video embeddings. Regarding positional embedding, we use 2D RoPE [127] for relative positional embeddings and 2D learnable absolute positional embeddings (abs) the same size as the model's input resolution. We interpolate positional embeddings to enable support for various resolutions beyond the default. The text context length is 72 for G-scale and 32 for B and L-scale models. Originally a bug, we find it optimal to not disable the class token when using attention pooling for smaller models. Thus, the B and L models use a class token, then the attention pooling layer probes all features at once (class token included). Finally, we use an input mean and standard deviation of $(0.5,0.5,0.5)$ for simplicity. + +
ScaleTowerParamsWidthDepthMLPHeadsCLIP DimPoolingPositional EmbeddingResolution & Context LenPatch SizeClass Token Register
BVision0.09B768123072121024Attn PoolRoPE+Abs22416
Text0.31B102424409616EOS TokenAbs32--
LVision0.32B1024244096161024Attn PoolRoPE+Abs33614
Text0.31B102424409616EOS TokenAbs32--
GVision1.88B1536508960161280Attn PoolRoPE+Abs44814
Text0.47B128024512020EOS TokenAbs72--
+ +PE Core Training. As discussed in §2.4, the training of $\mathrm{PE}_{\mathrm{core}}$ involves three stages: 1) image pretraining; 2) image and video finetuning; and 3) an additional model distillation for smaller models. These three stages work together to develop a robust and effective $\mathrm{PE}_{\mathrm{core}}$ model. + +We first provide training recipes for 1) image pretraining in Tab. 18 and 2) video finetuning in Tab. 19. + +Table 17 PE Model Configurations with full details. + +
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate2e-3
batch size131,072
warm-up steps2K
training steps443K (B, L) / 656K (G)
data quantity5.4B
samples seen58B (B, L) / 86B (G)
max logit scale100
mask reg ratio0.4
mask reg batch8192
progressive res112-160-224 (B)
98-154-224-336 (L)
98-154-224-336-448 (G)
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
+ +
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size4096
warm-up steps2K
training steps5.4K
data quantity22M
samples seen22M
max logit scale100
number of frames8
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
+ +
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size16384
warm-up steps2K
training steps269K
data quantity5.4B
samples seen4.4B
max logit scale100
teacher logit scale200 (§C.3)
data augNone
+ +Table 20 Distillation. + +Table 19 Video Finetuning. + +Table 18 Image Pretraining. + +After training the largest G-scale model, we train the smaller models with image pretraining, then distill with image distillation in Tab. 20, then finally apply video finetuning at the end. + +# B.1.2 Zero-Shot Classification and Retrieval + +Zero-Shot Evaluation on Images and Videos. We use CLIPBench for zero-shot classification and retrieval benchmarking. The benchmark datasets and splits are obtained from the original dataset websites or HuggingFace. We extend the CLIPBench zero-shot evaluation to include video datasets such as MSR-VTT and Kinetics, and will release our model checkpoints, evaluation code, and scripts for reproducibility. + +Prompt Design. For zero-shot image-text and video-text retrieval, we rely solely on the original captions without any additional prompts. In contrast, for zero-shot classification, we utilize task-specific prompts graciously provided by the InternVL [19] authors. All additional prompts will be released. + +For example, we employ specific prompts for zero-shot image classification on various ImageNet benchmarks (e.g., ImageNet val, ImageNet v2) and video classification on Kinetics datasets (e.g., K400, K600, K700). + +# Zero-Shot Image Classification Prompts - ImageNet + +a bad photo of a $\{\mathbf{c}\}$ . a photo of many $\{\mathbf{c}\}$ . a sculpture of a $\{\mathbf{c}\}$ . a photo of the hard to see $\{\mathbf{c}\}$ . a low resolution photo of the $\{\mathbf{c}\}$ . a rendering of a $\{\mathbf{c}\}$ . graffiti of a $\{\mathbf{c}\}$ . a bad photo of the $\{\mathbf{c}\}$ . a cropped photo of the $\{\mathbf{c}\}$ . a tattoo of a $\{\mathbf{c}\}$ . the embroidered $\{\mathbf{c}\}$ . a photo of a hard to see $\{\mathbf{c}\}$ . a bright photo of a $\{\mathbf{c}\}$ . a photo of a clean $\{\mathbf{c}\}$ . a photo of a dirty $\{\mathbf{c}\}$ . a dark photo of the $\{\mathbf{c}\}$ . a drawing of a $\{\mathbf{c}\}$ . a photo of my $\{\mathbf{c}\}$ . the plastic $\{\mathbf{c}\}$ . a photo of the cool $\{\mathbf{c}\}$ . a close-up photo of a $\{\mathbf{c}\}$ . a black and white photo of the $\{\mathbf{c}\}$ . a painting of the $\{\mathbf{c}\}$ . a painting of a $\{\mathbf{c}\}$ . a pixelated photo of the $\{\mathbf{c}\}$ . a sculpture of the $\{\mathbf{c}\}$ . a bright photo of the $\{\mathbf{c}\}$ . a cropped photo of a $\{\mathbf{c}\}$ . a plastic $\{\mathbf{c}\}$ . a photo of the dirty $\{\mathbf{c}\}$ . aJPEG corrupted photo of a $\{\mathbf{c}\}$ . a blurry photo of the $\{\mathbf{c}\}$ . a photo of the $\{\mathbf{c}\}$ . a good photo of the $\{\mathbf{c}\}$ . a rendering of the $\{\mathbf{c}\}$ . a $\{\mathbf{c}\}$ in a video game. a photo of one $\{\mathbf{c}\}$ . a doodle of a $\{\mathbf{c}\}$ . a close-up photo of the $\{\mathbf{c}\}$ . a photo of a $\{\mathbf{c}\}$ . the origami $\{\mathbf{c}\}$ . the $\{\mathbf{c}\}$ in a video game. a sketch of a $\{\mathbf{c}\}$ . a doodle of the $\{\mathbf{c}\}$ . a origami $\{\mathbf{c}\}$ . a low resolution photo of a $\{\mathbf{c}\}$ . the toy $\{\mathbf{c}\}$ . a rendition of the $\{\mathbf{c}\}$ . a photo of the clean $\{\mathbf{c}\}$ . a photo of a large $\{\mathbf{c}\}$ . a rendition of a $\{\mathbf{c}\}$ . a photo of a nice $\{\mathbf{c}\}$ . a photo of a weird $\{\mathbf{c}\}$ . a blurry photo of a $\{\mathbf{c}\}$ . a cartoon $\{\mathbf{c}\}$ . art of a $\{\mathbf{c}\}$ . a sketch of the $\{\mathbf{c}\}$ . a embroidered $\{\mathbf{c}\}$ . a pixelated photo of a $\{\mathbf{c}\}$ . itap of the $\{\mathbf{c}\}$ . a JPEG corrupted photo of the $\{\mathbf{c}\}$ . a good photo of a $\{\mathbf{c}\}$ . a plushie $\{\mathbf{c}\}$ . a photo of the nice $\{\mathbf{c}\}$ . a photo of the small $\{\mathbf{c}\}$ . a photo of the weird $\{\mathbf{c}\}$ . the cartoon $\{\mathbf{c}\}$ . art of the $\{\mathbf{c}\}$ . a drawing of the $\{\mathbf{c}\}$ . a photo of the large $\{\mathbf{c}\}$ . a black and white photo of a $\{\mathbf{c}\}$ . the plushie $\{\mathbf{c}\}$ . a dark photo of a $\{\mathbf{c}\}$ . itap of a $\{\mathbf{c}\}$ . graffiti of the $\{\mathbf{c}\}$ . a toy $\{\mathbf{c}\}.$ itap of my $\{\mathbf{c}\}.$ a photo of a cool $\{\mathbf{c}\}.$ a photo of a small $\{\mathbf{c}\}.$ a tattoo of the $\{\mathbf{c}\}.$ + +# Zero-Shot Video Classification Prompts - Kinetics + +a photo of $\{\mathbf{c}\}$ . a photo of a person $\{\mathbf{c}\}$ . a photo of a person using $\{\mathbf{c}\}$ . a photo of a person doing $\{\mathbf{c}\}$ . a photo of a person during $\{\mathbf{c}\}$ . a photo of a person performing $\{\mathbf{c}\}$ . a photo of a person practicing $\{\mathbf{c}\}$ . a video of $\{\mathbf{c}\}$ . a video of a person using $\{\mathbf{c}\}$ . a video of a person doing $\{\mathbf{c}\}$ . a video of a person during $\{\mathbf{c}\}$ . a video of a person performing $\{\mathbf{c}\}$ . a video of a person practicing $\{\mathbf{c}\}$ . a example of $\{\mathbf{c}\}$ . a example of a person $\{\mathbf{c}\}$ . a example of a person using $\{\mathbf{c}\}$ . a example of a person doing $\{\mathbf{c}\}$ . a example of a person during $\{\mathbf{c}\}$ . a example of a person performing $\{\mathbf{c}\}$ . a example of a person practicing $\{\mathbf{c}\}$ . a demonstration of $\{\mathbf{c}\}$ . a demonstration of a person $\{\mathbf{c}\}$ . a demonstration of a person using $\{\mathbf{c}\}$ . a demonstration of a person doing $\{\mathbf{c}\}$ . a demonstration of a person during $\{\mathbf{c}\}$ . a demonstration of a person performing $\{\mathbf{c}\}$ . + +Evaluation Method. Several works use different input transformations for different datasets when evaluating zero-shot performance (e.g., [33, 130, 138, 160]). To be as fair as possible, we follow [130] in evaluating with two transformations—center crop and non aspect ratio preserving resize ("squash")—and report the max between the two for all models and all datasets we evaluate. Additionally, ObjectNet has a red border around every image to facilitate dedduplication, which we remove for evaluation. Finally, we follow [19] in using retrieval reweighting (DSL), applying the softmax score distribution to the similarities used for retrieval: + +$$ +\text {s c o r e s} = \text {s c o r e s} * \text {s o f t m a x} (\text {s c o r e s}, \dim = 0) \tag {1} +$$ + +This slightly improves retrieval for most models, so we do it for all models we evaluate for fairness. Notably, we were able to reproduce the reported numbers for most papers with these techniques, but for cases where we could not, we default to the reported number. + +# B.2 PE: Language Alignment + +We provide details of the MLLM experimental setup in $\S 4$ . We describe data, model, and training separately. + +Data. Our MLLM training contains warmup data and supervised finetuning (SFT) data. Our warmup data is a 1M subset image-text pairs of our $\mathrm{PE}_{\mathrm{core}}$ pretraining dataset. For SFT data, we use a diverse data + +mix consisting of 2.6M unique samples. This dataset is composed of $1.7\mathrm{M}^7$ visual QAs samples from the Cauldron [65], 0.5M grounded QA pairs from Visual Genome [60], Flickr-Entities [103] and Densely Captioned Images [139], 0.1M image-captioning pairs from COCO [76] and 0.3M text-only samples. This comprehensive data mix allows us to thoroughly assess our model's capabilities in various MLLM tasks. + +Model. As described in § 4.1, we use a simple vision-language model architecture where a vision encoder and a pretrained decoder-only LLM are connected by a vision projector. For all tables, we use either Llama3.1-instruct 8B or QwenLM 2.5-instruct 7B as a language model, and 2-layer MLP as a vision projector. For fair comparison, we use the native resolution for image input. During inference, we evaluate the models on video tasks in zero-shot manner: We concatenate all video frames into a sequence and feed to language model, without seeing video samples during SFT. For all video tasks, we use 8 frames with the same native resolution of height and width. For $\mathrm{PE}_{\mathrm{core}}$ and $\mathrm{PE}_{\mathrm{lang}}$ , this makes $448 \times 448 \times 8$ input and $32 \times 32 \times 8$ vision tokens. + +Training. MLLM training consists of warmup and supervised finetuning (SFT) stages. In both stages, we freeze vision encoder and train vision projector and LLM. During warmup stage, we use a global batch size of 128 with a learning rate of $1 \times 10^{-4}$ . We gradually increase the learning rate from $1 \times 10^{-6}$ to $1 \times 10^{-4}$ over 120 steps, and follow a cosine learning rate decay schedule to train a total of 8,000 steps. During SFT stage, we use a global batch size 256 with a learning rate of $1 \times 10^{-5}$ . Similar to the warmup, we gradually increase the learning rate from $1 \times 10^{-7}$ to $1 \times 10^{-5}$ over 300 steps, and follow a cosine learning rate decay schedule to train a total of 12.5K steps. We truncate text-sequences longer than 2,048 tokens on top the visual tokens. This makes the maximum sequence length to be (num. vision tokens) + 2,048. With $448 \times 448$ input resolution and patch size of 14, we set the maximum sequence length to $1,024 + 2,048 = 3,072$ . To represent bounding boxes on output side for image grounding tasks, we simply use text tokens to represent each bounding box: each coordinate is normalized between 000 and 999, in “[x, y, x, y]” box format for top-left and bottom-right corners (e.g., [012, 122, 633, 782]). + +For all baselines, we search for the best intermediate layer features to adapt to LLM. We search over $\{-1, -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -40\}$ layers (counting from last) and report the best result in average over OCR/Chart/Document Q&A, Visual Q&A, Image Captioning and Video Understanding. + +# B.3 PE: Spatial Alignment + +# B.3.1 Training Details + +Loss Functions. For self-aligning to frozen $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ layer 41 features ( $L_{\mathrm{core}}$ ), we minimize cosine similarity: + +$$ +L _ {\mathrm {c o r e}} = \frac {1}{n _ {\mathrm {t o k}}} \sum \left(\frac {\left(S _ {5 0}\right) \left(T _ {4 1}\right) ^ {T}}{\left\| S _ {5 0} \right\| \cdot \left\| T _ {4 1} \right\|}\right) \tag {2} +$$ + +where $S_{50}$ denotes the last layer features of the student, $T_{41}$ denotes frozen layer 41 features from $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ , and $n_{\mathrm{tok}}$ represents the number of tokens. Note that we chose 41 fairly arbitrarily (it is layer 40 when written with indexing from 0). Judging by Fig. 8, any layer around 40 should work (and 39 may be slightly better). + +For the encouraging locality loss $(L_{\mathrm{loc}})$ , we compute the pairwise cosine similarity between a model's own tokens and itself. This forms a "spatial correspondence map" for what tokens should be considered similar. We then compute the same for the student, and minimize the difference between the two with MSE loss: + +$$ +L _ {\text {l o c}} = \frac {1}{n _ {\text {t o k}} ^ {2}} \sum \left(\frac {(S _ {5 0}) (S _ {5 0}) ^ {T}}{| | S _ {5 0} | | ^ {2}} - \frac {(T _ {\mathrm {S A M}}) (T _ {\mathrm {S A M}}) ^ {T}}{| | T _ {\mathrm {S A M}} | | ^ {2}}\right) ^ {2} \tag {3} +$$ + +where $T_{\mathrm{SAM}}$ denotes the "SAM Mask Logits" constructed in §5.2. We also find using a temperature $(t)$ on the SAM teacher's pairwise cosine similarity term $(x)$ useful: $e^{t(x - 1)}$ . The full loss is $L_{\mathrm{spatial}} = L_{\mathrm{core}} + L_{\mathrm{loc}}$ . + +Hyperparameters. In Tab. 21 we show the training hyperparameters for spatial alignment, finetuned on top of the initial $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ checkpoint. Then in Tab. 22 and Tab. 23, we show the settings for the two teachers and losses. Note that when running the teachers, we run them on the exact same image as the student (same data + +aug and all). Additionally, because the SAM 2.1 teacher operates at a resolution of 1024, we upsample the image, generate the mask logits, and then downsample the result. Both teachers are frozen. + +
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate5e-4
batch size12,288
warm-up steps0
training steps24K
data quantity5.4B (PEcore PT Data)
samples seen300M
resolution448
mask ratio0.75
mask size2×2 tokens
droppath0.4
layerscale0.1
aspect jitter ar(0.75,1.33)
data augcolor jitter j(0.32,0,0.32,0)
hflip p(0.5)
+ +Table 21 Spatial Alignment. + +
configvalues
modelSAM 2.1-L
layermask logits
resolution1024 (interp→448)
lossEq. 3
loss weight1
temperature20
sample points32 × 32 (1024)
pred iou threshold0
stability score threshold0
mask threshold0
+ +Table 22 SAM 2.1 Teacher. + +
configvalues
modelPEcoreG
layer41
resolution448
lossEq. 2
loss weight1
+ +Table 23 PEcoreG Teacher. + +# B.3.2 Visualization Method + +To visualize the features in Fig. 17 and Fig. 20, our goal is to map a 1536-dimensional space down to 3 dimensions to view how the model encodes each token in relation to each other. One naive approach would be to apply PCA with 3 dimensions across all token in the image. However, we find this alone can be misleading. + +Specifically, if the model has rich semantics, it should be the case that most of those 1536 features have some useful information in them. Some of that information could be spatially contiguous, some of it not. We want PCA to only select the spatially contiguous information, since we are trying to evaluate the spatial quality of the features. However, naively applying PCA will not necessarily do that, especially for models with information aggregated in "global tokens" (§5.1). Despite these tokens carrying important information, they are not spatially contiguous. Thus, if PCA dedicates a large portion of its 3 dimensions to global tokens, the features will look like their spatial quality is bad, despite the features containing good spatial information. + +So, how do we select for only the spatially contiguous information to visualize? The answer is simple: by definition, the spatially contiguous information will be... spatially contiguous. To keep the spatially contiguous information while lowering the impact of the global tokens, we can simply apply a low pass filter to the features (specifically, a gaussian blur with kernel size 3 and a $\sigma$ of 1). To retain the detail of the original features, we can average the two together. Thus, to visualize features, we use the 3D PCA of the of the following. $x$ denotes the model's output features, and $g(x)$ denotes gaussian blur. + +$$ +0. 5 x + 0. 5 g (x, k = 3, \sigma = 1) \tag {4} +$$ + +We show the impact of this in Fig. 19. Blurring the features make them appear more detailed! In reality, that information was always there, just PCA did not show it. Thus, great care must be taken when visualizing high dimensional feature spaces. If they were easy to map to 3 dimensions—you wouldn't need 1536 of them! + +![](images/64c17d9abfbb16b4abdaf37be7f39a74f506431fd7c28364f87176745b193285.jpg) +Figure 19 Feature Visualization Ablation. With raw features (top row), PCA misses spatially contiguous parts of the feature space and instead focuses on global tokens (which carry information but are not spatially coherent). By applying a simple low pass filter (bottom row), we can reveal spatial information that PCA originally missed (see column 2: with raw features, the background looks like a mess, with the low pass filter the tiles become visible). + +Then, to map the PCA dimensions to RBG pixel values, we map each PCA component to a corresponding channel in LCh color space, then convert those LCh colors to RGB to get the final image. Note that we use LCh instead of RGB directly for aesthetic reasons, and also because LCh is a cylindrical color space—where smooth changes to the values look like smooth changes in colors to humans—and thus is easier to discern. + +# B.3.3 Frozen Feature Dense Prediction + +We discuss the detailed settings of the results for dense prediction with frozen features in Tab. 13. Each model is evaluated with its native resolution up to 448 or 448 (whichever is optimal). + +Zero-Shot Tracking. We evaluate our pretrained models on label propagation task using the protocols in [52, 107] on DAVIS dataset [104]. This evaluation does not require any finetuning or probing, therefore preserves the spatial features in the model. Following Toto [107], we use the features from the last $n = 7$ frames to find the nearest neighbor patch in the current frame, and then propagate the masks from the previous frames to the current frame. Note that this evaluation method does not require any training. + +Semantic Segmentation. For semantic segmentation, we evaluate our pretrained models on ADE20K [167] semantic segmentation task. We use a linear layer and convolutional layer to map intermediate spatial features to segmentation masks following [98]. The models are evaluated and then features are resized to $518 \times 518$ . We only use features from single layer. The probing layers are finetuned with AdamW [83] with a learning rate of 0.001. + +Depth Estimation. For depth estimation on NYUv2 [123], we follow [75, 98]. We use a DPT-head [109] on top of our frozen pretrained model and use only single layer features. We scale the size of the DPT-head for each models based on the hidden size for each architecture. Because NYU is a small dataset and the models we evaluate are large, we observe the results for most models are noisy and prone to overfitting. Thus, for fair comparison we train all models for 20 epochs and for all models take the lowest validation loss over all epochs. + +Frozen Detection. For the frozen feature detection results presented in §3, we evaluated using Mask R-CNN [43] as a probe. We used a resolution of 1024 for Fig. 8 and 768 for the remaining experiments in §3. Because the backbones were frozen, we did not add any global attention and instead simply tiled the input image with a window size of 32 for the 1024px experiments and 24 for the 768px experiments. All models were interpolated to patch 16. Finally, the backbones were frozen and only the FPN and R-CNN heads trained for 15 epochs on COCO with a stepwise decay LR without drop path. + +# B.3.4 End-to-End Finetuning Detection and Segmentation + +We provide a detailed discussion of settings of end-to-end finetuning on detection and segmentation presented in Tab. 14. The hyperparameters can be found in Tab. 24. We find that the default 100-epoch protocol in ViTDet [72, 149] causes overfitting problems in COCO experiments especially for billion-level parameter vision encoders, so we tune the training epochs, learning rate, drop path and learning rate decay accordingly. + +The LVIS experiment setting is the same as COCO except all L-size models use learning rate of 2e-4 and all g-size and G-size models use 75 epochs. + +
configvaluesmodellrepochsdrop pathlr decaylayersglobal window indexwindow size
optimizerAdamWOpenAI CLIP-L1e-41000.40.824(5, 11, 17, 23)14
optimizer momentum(0.9, 0.999)MetaCLIP-L1e-41000.40.824(5, 11, 17, 23)14
weight decay0.1MetaCLIP-G5e-5750.50.948(11, 23, 35, 47)14
learning rateSigLIP-so1e-41000.40.827(2, 10, 18, 26)14
learning rate scheduleStep-wise decayEVA02-L1e-41000.40.824(5, 11, 17, 23)14
learning rate decayMAE-L1e-41000.40.824(5, 11, 17, 23)14
batch size64SigLIP2-so1e-41000.40.827(2, 10, 18, 26)14
image size1024 × 1024SigLIP2-g5e-5750.50.940(9, 19, 29, 39)14
augmentationLSJ [0.1, 2.0]DINOv2-L1e-41000.40.824(5, 11, 17, 23)32
epochsDINOv2-g5e-5360.50.940(9, 19, 29, 39)32
drop pathPEcoreG5e-5750.50.950(12, 24, 36, 49)32
positional embeddingabswin [7]PEspatialG5e-5360.50.950(12, 24, 36, 49)32
patch size16
window size
global window index
+ +Table 24 Settings for End-to-End Finetuning Detection and Segmentation. + +# B.3.5 System-Level Comparison on Detection + +We describe our implementation for system-level comparison to the state-of-the-arts on COCO object detection in Tab 15. Our implementation is based on the DETA repository8. We replace the vision encoder with our $\mathrm{PE}_{\mathrm{spatial}}$ and maintain the same hyperparameters as in the end-to-end finetuning settings, while keeping the detector unchanged. The training process consists of three stages: + +
Test-Time AugAPbox
No TTA65.2
+ More Queries65.3
+ SoftNMS [6]65.8
+ Flip Aug65.8
+ Multiscale Aug66.0
+ +1. Initial Training: Train on Objects365 for 12 epochs with an image resolution of $1024 \times 1024$ , a total batch size of 256, and a learning rate of 2e-4, which is divided by 10 at the 10th epoch. +2. Increasing Resolution: Continue training on Objects365 for 6 epochs with a resolution of $1536 \times 1536$ , a total batch size of 128, and a learning rate of 5e-5, which is divided by 10 at the 5th epoch. +3. Finetuning: Finetune on COCO dataset for 12 epochs with an image resolution of $1728 \times 1728$ , a total batch size of 64, and a learning rate of 5e-5, which is divided by 10 at the 8th epoch. +4. Further Increasing Resolution: Further finetune on COCO dataset for 3 epochs with a resolution of $1824 \times 1824$ , a total batch size of 64. To save GPU memory, we use SGD optimizer instead of Adam, with a learning rate of 5e-3, which is divided by 10 at the 2th epoch. + +We apply a series of test-time augmentation techniques to further improve the performance, detailed in Tab. 25. + +# C Additional Results + +# C.1 PEcore: Robust Image Pretraining + +In Tab. 26, we present the raw data for the robustness metrics in Fig. 2. Across the board, each change improved almost all metrics (with the exception of progressive resolution slightly hurting the average and mask regularization slightly hurting ImageNet Adversarial). The fact that there were no tradeoffs to these changes, indicate that their improvements to the features are general. This could be why most of these changes improved performance for downstream tasks as well. + +Note that in §2.1, we only discuss changes that we know to work. There are several changes that we have tried that do not work (i.e., do not improve performance or lower performance). For instance: average pooling instead of using a class token, increasing the text tower size, using hue or contrast jitter, and maintaining the same resolution throughout training but dropping tokens instead of progressive resolution (FLIP-style). + +We also find increasing batch size and increasing training iterations for an L scale model to have equivalent effects. This is in contrast to the batch size scaling observed by [160], but it is possible that this difference is down to a hyperparameter issue. + +Table 25 Test-Time Aug for system-level comparison on COCO in Tab. 15. + +
StepZero-Shot Classification
Avg Class.ImageNet w1/2[26]ImageNet v2[12]ObjectNet IN Classes [4]ImageNet Adversarial [47]ImageNet Reminims [46]ImageNet Sketch [143]
1Baseline75.378.971.973.768.391.167.8
2Progressive Resolution75.178.971.872.469.990.567.0
3High Batch Size76.279.572.874.171.891.068.1
4LAMB and High LR76.979.973.374.373.591.568.6
5High Resolution (336)78.380.473.875.679.292.068.8
62D RoPE79.280.774.177.480.992.769.4
7Attention Pooling80.181.074.878.482.993.469.9
8Data Augmentation80.881.175.280.883.193.571.2
9Mask Regularization80.981.375.380.982.893.871.2
+ +Table 26 Robust Image Pretraining Full Results. Raw results for the robustness metrics metrics in Fig. 2. Almost every change improves every metric, but some metrics are improved more than others (e.g., ObjectNet and ImageNet-A). + +# C.2 $\mathsf{PE}_{\mathrm{core}}$ : Video Data Scaling + +
Video Data SizeAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet w1 [20]ImageNet v2 [112]ObjectNet In Classes [4]ImageNet adversarial [47]MS-COCO ts→img [76]MS-COCO img→ts [76]MS-COCO ts→img [76]Average VideoKeritics 400 [55]Keritics 600 [55]Keritics 700 [55]UCF 101 [126]HMDB 51 [62]MSR-VTT ts→vid [153]MSR-VTT vid→ts [153]
0M77.083.978.686.690.352.170.357.070.369.461.678.547.440.531.4
3M77.784.178.886.690.953.374.261.672.472.264.288.553.842.837.6
6M78.084.279.086.791.154.072.763.673.573.466.088.954.644.943.6
8M78.484.279.287.091.654.973.664.874.574.567.789.555.346.945.5
11M78.684.279.287.291.855.473.865.275.175.067.689.755.647.745.8
14M78.884.279.287.591.955.774.365.575.475.367.989.955.847.846.3
17M78.984.279.287.792.055.874.365.875.775.568.290.256.048.346.7
+ +The detailed video data scaling results are presented in Tab. 27. Our experiments demonstrate that increasing the number of synthetic video data generated by the proposed video data engine enhances the performance of classification and retrieval on both image and video benchmarks. On image benchmarks, while improvements on ImageNet val and v2 plateaued earlier compared to ObjectNet and ImageNet Adversarial, MS-COCO retrieval performance continued to show gains. On video benchmarks, scaling synthetic video data consistently yields better performance for both classification and retrieval tasks. We expect that further scaling up the video data with our video data engine will continue to drive performance improvements. + +# C.3 $\mathsf{PE}_{\mathrm{core}}$ : Smaller Models + +Table 27 Scaling Video Data. Increasing the number of synthetic video data generated by our proposed video data engine consistently enhances the performance of image and video classification and retrieval tasks. + +
ModelTeacher's TempModel ScaleZero-Shot Classification
Avg Class.ImageNet val [26]ImageNet v2 [112]ObjectNet JV Classes [4]ImageNet Adversarial [47]ImageNet Renditions [46]ImageNet Sketch [143]
vanilla pretrained model-B66.274.267.462.550.283.059.8
distillation×2B65.271.865.561.450.283.658.6
×1B68.074.968.164.754.185.361.1
×0.7B68.275.168.265.354.485.161.3
×0.5B68.375.268.265.354.285.261.4
+ +Table 28 Ablation Study on Teacher's Distribution Temperature. We evaluate the effect of varying temperatures on the teacher's distribution, using a pretrained vanilla CLIP model (ViT-B/14, resolution 224) as a baseline (details in §2.1). The models are finetuned via distillation with a short schedule of 50K steps. + +Ablation: Distillation Temperature. To optimize the performance of smaller models (B and L-scales in Tab. 4), we utilize a distillation finetuning approach with $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ as the teacher model. During this process, both student and teacher models encode image and text inputs to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence loss on both image-to-text and text-to-image similarity distributions. + +We find that using a fixed and smaller temperature (i.e., higher logit scale), which controls the range of logits in the softmax, significantly enhances the effectiveness of distillation. This results in a sharper distribution for the teacher's distributions. In contrast, the student's temperature remains learnable, consistent with our pretraining procedure and CLIP training. + +In Tab. 28, we present an ablation study examining the impact of temperature on the teacher's distribution. For this analysis, we utilize a pretrained vanilla CLIP model (ViT-B/14, resolution 224), which serves as a baseline for comparison (see §2.1 for details). The models are finetuned using distillation with a concise schedule of 50K steps. Notably, our results show that employing a smaller temperature for the teacher's distributions yields improved performance on zero-shot ImageNet benchmarks. + +Building strong smaller models. In Tab. 29, we demonstrate our step-by-step training strategy for building strong smaller models at the L scale, as discussed in §2.4. Specifically, we outline our approach to image pretraining, image distillation, and video finetuning, and distillation. Leveraging the robust foundation established by our + +
ModelStageImage Zero-ShotVideo Zero-Shot
Average ImageImageNetv1 [26]ImageNetv2 [112]ObjectNetIN Classes [4]ImageNetAdversarial [47]MS-COCOv1→v1img [76]MS-COCOimg→v1img [76]Average VideoKinetics400 [55]Kinetics600 [53]Kinetics700 [55]UCF101 [126]HMDB 51 [62]MS-RVTTv1→v1v1d [153]MS-RVTTv1→v1v1d [153]
SigLIP2-L/16 [138]-76.083.177.484.484.355.371.456.265.362.556.886.749.341.531.4
PEcoreLimage pretraining75.182.976.881.885.653.070.459.068.067.758.585.557.742.033.4
PEcoreL+image distillation from PEcoreG77.683.678.184.488.956.074.764.573.072.664.886.558.047.948.4
PEcoreL+video finetuning78.083.577.984.789.057.175.965.373.472.765.387.158.550.350.1
+ +pretraining techniques (§2.1), we show that distilling from $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ , our strongest unified perception encoder, yields improvements on both image and video benchmarks. Furthermore, a short-scheduled video finetuning provides an additional boost in performance on both benchmarks. + +# C.4 $\mathsf{PE}_{\mathrm{lang}}$ : Additional Results + +Analogous to Tab. 10, in Tab. 30, we compare $\mathrm{PE}_{\mathrm{core}}$ and $\mathrm{PE}_{\mathrm{lang}}$ with dynamic resolution setting [77, 82]. More specifically, we use up to 4 tiles, following after a thumbnail, which is a whole image resized into $448 \times 448$ . With the maximum number of tiles of 4, the model can cover $\{1 \times 1, 1 \times 2, 1 \times 3, 1 \times 4, 2 \times 1, 2 \times 2, 3 \times 1, 4 \times 1\}$ tile ratios. Similar to the Tab. 10, 11, 12 in the main paper, we show that $\mathrm{PE}_{\mathrm{lang}}$ largely outperforms the baseline vision encoders by large margins across all categories of MLLM tasks. Note that $\mathrm{PE}_{\mathrm{lang}}$ has been alignment-tuned with native resolution input, as opposed to e.g., InternViT 2.5, which has been midtrained with dynamic tiling, which shows $\mathrm{PE}_{\mathrm{lang}}$ 's strong generality for different input formats. + +Next, in Tab. 31, 32, 33, we show the breakdowns of RefCOCO/+/g [56] with Llama 3.1-instruct 8B as language model, Qwen2.5 LM 7B as language model, and with Llama 3.1-instruct 8B and dynamic tiling $(4 + 1)$ , respectively. In our SFT data, we have VisualGenome [60], DCI [139], and Flickr30K [103] as grounding datasets, and RefCOCO/+/g are unseen. We therefore report zero-shot performance of the MLLMs to evaluate spatial understanding capability of the vision encoders. Overall, $\mathrm{PE}_{\mathrm{lang}}$ L or G show the best performance across all RefCOCO splits, except with Qwen2.5 LM. This is because (1) InternViT 2.5 6B is midtrained with Qwen2 LM, and (2) during pre/mid-training the training data of RefCOCO/+/g are seen. + +Table 29 Building Strong Smaller Models. This table illustrates the step-by-step process of developing the $\mathrm{PE}_{\mathrm{core}}\mathrm{L}336\mathrm{px}$ model, as outlined in §2.4. Starting with the pretrained $\mathrm{PE}_{\mathrm{core}}\mathrm{L}$ , both image distillation, along with video finetuning, enhance performance across image and video benchmarks, resulting in a unified L-scale model. + +
ModelEncoder ParamsResolution Patch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [1]Avg. Ground. RetCOCOg+ [58]Avg. VideoVdeoMME Acc [38]STAR Acc [148]TCIF-QA Acc [53]EgoSchema Acc [89]MVBench Acc [68]PerceptionTest Acc [105]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1461.871.162.540.273.374.665.364.988.579.8113.490.4133.5116.267.148.044.847.162.739.046.048.3
MetaCLIP-G [152]1.8B224/1460.368.161.339.172.874.965.465.988.280.1114.291.8134.4116.566.049.046.546.562.545.044.748.9
PElang G†1.7B*224/1470.279.879.147.574.676.070.664.388.380.6116.392.0136.4120.569.556.649.055.969.961.250.053.6
576 Tokens per Tile
CLIP [106]0.3B336/1469.676.878.250.372.976.371.864.988.080.4114.090.9134.4116.668.550.846.652.265.044.646.349.9
AIMv2-L [37]0.3B336/1466.774.174.945.272.477.473.565.689.081.7116.492.5137.1119.566.654.143.454.370.656.047.352.7
SigLIP2-so [138]0.4B384/1655.561.454.933.372.376.570.166.088.681.2118.095.8138.3119.866.554.344.952.866.858.649.653.3
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1477.582.188.561.877.479.780.266.489.882.5120.397.4140.2123.271.959.849.462.774.164.053.155.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1656.966.056.534.370.976.469.966.288.481.2117.894.7137.8120.967.846.247.044.966.739.234.545.1
PEcoreL0.3B448/1467.172.478.346.471.276.474.063.788.879.0113.991.5134.5115.762.951.447.051.262.749.647.850.1
PElang L0.3B448/1478.382.889.365.275.978.578.864.489.681.3117.894.7138.1120.771.656.547.057.268.059.852.354.7
AIMv2 3B [37]2.7B448/1467.573.078.246.572.278.879.266.288.381.7119.095.8139.7121.565.154.049.655.467.349.649.952.5
InternViT2.5 6B [18]5.5B448/1467.474.674.347.672.975.971.364.887.779.7110.485.3132.5113.556.852.046.049.665.050.649.651.3
PEcoreG1.9B448/1468.073.481.247.669.776.474.362.589.179.6113.091.6134.5112.967.653.246.054.367.051.248.752.0
PElang G1.7B*448/1478.681.889.867.875.080.382.366.789.682.8119.695.2140.3123.471.859.049.661.873.960.052.656.3
+ +Table 30 4+1 Tile Llama 8B MLLM Results. Llama 3.1-instruct 8B [82] is used as a language model. ${}^{*}\mathrm{PE}_{\mathrm{lang}}$ has 1.7B parameters since we discard the last 3 layers during language alignment. All MLLMs are trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of $448\times 448$ (or the corresponding resolution for each encoder). The image tiles follow after a thumbnail input, similar to prior work [77]. Evaluation on an model that was interpolated without additional training (i.e., zero-shot resolution). + +
ModelEncoder ParamsResolution Path SizeAvg. Ground.
RefCOCO val/ [56]RefCOCO testA [56]RefCOCO val/ [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1460.663.656.767.554.158.948.867.267.8
MetaCLIP-G [152]1.8B224/1460.562.056.567.853.558.749.268.268.3
PEiang G†1.7B*224/1465.767.764.470.958.362.056.673.274.4
576 Tokens per Image
CLIP [106]0.3B336/1465.066.761.471.657.662.554.573.272.8
AIMv2-L [37]0.3B336/1463.365.461.669.655.060.052.071.171.5
AIMv2-L Dist. [37]0.3B336/1462.664.861.069.454.459.051.370.870.0
SigLIP2-so [138]0.4B384/1667.468.866.571.060.361.858.576.276.0
SigLIP2-g-opt [138]1.1B384/1666.567.966.170.158.861.757.175.575.0
PEiang G†1.7B*336/1468.969.867.573.261.564.060.877.377.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1466.969.366.772.658.363.157.274.274.0
SigLIP2-so [138]0.4B512/1669.671.469.274.461.364.860.377.977.2
PEcore L0.3B448/1459.761.755.366.953.158.848.068.567.5
PEiang L0.3B448/1470.571.870.273.063.766.162.778.878.9
DINOv2 [98]1.1B448/1464.967.262.570.557.061.054.573.173.1
AIMv2 3B [37]2.7B448/1436.137.634.140.732.736.232.036.938.6
InternViT2.5 6B [18]5.5B448/1468.070.267.672.260.664.058.775.375.2
PEcore G1.9B448/1466.668.364.472.358.762.756.075.175.0
PEiang G1.7B*448/1471.371.969.975.164.267.363.079.479.2
+ +Table 31 Llama MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used for zeroshot RefCOCO/+/g grounding. + +
ModelEncoder ParamsResolutionPatch SizeAvg. Ground.
RefCOCO var[56]RefCOCO texA[56]RefCOCO var[56]RefCOCO+ texA[56]RefCOCO+ var[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1670.073.673.074.360.962.759.978.477.2
SigLIP2-g-opt [138]1.1B384/1669.973.372.473.660.562.360.778.478.2
PEiangG†1.7B*336/1470.173.472.075.362.064.261.278.477.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1468.172.469.174.159.362.456.675.275.5
SigLIP2-so [138]0.4B512/1670.574.173.774.461.762.961.078.677.9
PEcoreL0.3B448/1466.570.467.871.557.761.156.275.875.3
PEiangL0.3B448/1470.474.472.674.662.264.062.079.078.7
DINOv2 [98]1.1B448/1469.373.471.173.960.063.959.076.476.7
AIMv2 3B [37]2.7B448/1467.671.467.772.359.261.256.376.476.4
InternViT2.5 6B‡ [18]5.5B448/1472.877.776.577.163.666.062.280.079.5
PEcoreG1.9B448/1470.574.071.875.861.564.860.178.577.3
PEiangG1.7B*448/1472.175.472.976.364.265.962.979.779.7
+ +Table 32 Qwen MLLM-Based Zereshot RefCOCO. QwenLM 2.5 7B [155] is used as a language model. All MLLMs report zereshot results on RefCOCO/+/g datasets. $\ddagger$ Trained with RefCOCO/+/g beforehand. + +
ModelEncoder ParamsResolutionAvg. Ground.Grounding
RefCOCORefCOCORefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+
val [56]val [56]val [56]val [56]val [56]val [56]val [56]val [56]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1467.169.365.073.260.564.956.574.373.4
MetaCLIP-G [152]1.8B224/1466.067.963.271.959.262.955.873.873.1
PElang G†1.7B*224/1470.371.669.673.763.366.262.678.678.2
576 Tokens per Tile
CLIP [106]0.3B336/1468.570.766.674.161.165.958.176.075.1
AIMv2-L [37]0.3B336/1466.668.465.571.459.363.456.574.274.2
SigLIP2-so [138]0.4B384/1666.567.966.170.158.861.757.175.575.0
SigLIP2-g-opt [138]1.1B384/1666.568.265.670.159.062.358.074.874.0
PElang G†1.7B*336/1471.973.671.574.964.867.363.980.480.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1667.869.267.871.259.962.559.076.976.0
PEcoreL0.3B448/1462.965.359.969.256.662.252.070.170.0
PElang L0.3B448/1471.673.070.874.365.267.262.979.779.7
AIMv2 3B [37]2.7B448/1465.166.962.971.158.162.455.671.872.2
InternViT2.5 B‡ [18]5.5B448/1456.861.056.465.851.057.046.158.058.9
PEcoreG1.9B448/1467.669.265.872.459.964.158.375.175.6
PElang G1.7B*448/1471.872.670.774.664.866.664.680.480.3
+ +Table 33 4+1 Tile Llama 8B MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used as a language model. All trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of the encoder's native resolution, with a thumbnail image in front, similar to prior work [77]. ${}^{ \ddagger }$ Trained with RefCOCO/+/g beforehand. + +# C.5 PEspatial: Additional Qualitative Results + +![](images/a2b482a782d2db69b553dc95ebc085fdd0e0dfdd61c5f58feb5493e4e9b8bf2f.jpg) +Figure 20 More Visualizations of the feature space following Fig. 17. After the image itself, column 1 is $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ last layer features, column 2 is $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ aligned to its own layer 41, column 3 is $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ aligned to SAM 2.1-L [111] mask logits, and column 4 is $\mathrm{PE}_{\mathrm{core}}\mathrm{G}$ aligned to both, denoted $\mathrm{PE}_{\mathrm{spatial}}\mathrm{G}$ . See §B.3.2 for visualization method. + +![](images/775649cb979ad831b819e01ac5e03a0dcd2653c5882b8a59349bf10a1ceb5b89.jpg) + +# References + +[1] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In ICCV, 2019. 14, 15, 16, 32 +[2] Pravesh Agrawal, Szymon Antoniak, Emma Bou Hanna, Baptiste Bout, Devendra Chaplot, Jessica Chudnovsky, Diogo Costa, Baudouin De Monicault, Saurabh Garg, Theophile Gervet, Soham Ghosh, Amélie Héliou, Paul Jacob, Albert Q. Jiang, Kartik Khandelwal, Timothee Lacroix, Guillaume Lample, Diego Las Casas, Thibaut Lavril, Teven Le Scao, Andy Lo, William Marshall, Louis Martin, Arthur Mensch, Pavankumar Muddireddy, Valera Nemychnikova, Marie Pellat, Patrick Von Platen, Nikhil Raghuraman, Baptiste Rozière, Alexandre Sablayrolles, Lucile Saulnier, Romain Sauvestre, Wendy Shang, Roman Soletskyi, Lawrence Stewart, Pierre Stock, Joachim Studnia, Sandeep Subramanian, Sagar Vaze, Thomas Wang, and Sophia Yang. Pixtral 12b. arXiv:2410.07073, 2024. 20 +[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv:2308.12966, 2023. 20 +[4] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models. In NeurIPS, 2019. 3, 4, 6, 8, 9, 10, 30, 31, 32 +[5] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, Thomas Unterthiner, Daniel Keysers, Skanda Koppula, Fangyu Liu, Adam Grycner, Alexey A. Gritsenko, Neil Houlsby, Manoj Kumar, Keran Rong, Julian Eisenschlos, Rishabh Kabra, Matthias Bauer, Matko Bosnjak, Xi Chen, Matthias Minderer, Paul Voigtlaender, Ioana Bica, Ivana Balazevic, Joan Puigcerver, Pinelopi Papalampidi, Olivier J. Henaff, Xi Xiong, Radu Soricut, Jeremiah Harmsen, and Xiaohua Zhai. PaliGemma: A versatile 3b VLM for transfer. arXiv:2407.07726, 2024. 20 +[6] Navaneeth Bodla, Bharat Singh, Rama Chellappa, and Larry S Davis. Soft-NMS-Improving object detection with one line of code. In ICCV, 2017. 30 +[7] Daniel Bolya, Chaitanya Ryali, Judy Hoffman, and Christoph Feichtenhofer. Window attention is bugged: how not to interpolate position embeddings. In *ICLR*, 2023. 11, 29 +[8] Florian Bordes, Randall Balestriero, Quentin Garrido, Adrien Bardes, and Pascal Vincent. Guillotine regularization: Why removing layers is needed to improve generalization in self-supervised learning. arXiv:2206.13378, 2022. 20 +[9] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - Mining discriminative components with random forests. In ECCV, 2014. 9 +[10] Gary Bradski. The OpenCV library. Dr. Dobb's Journal: Software Tools for the Professional Programmer, 2000. 22 +[11] Zhaowei Cai and Nuno Vasconcelos. Cascade R-CNN: Delving into high quality object detection. In CVPR, 2018. 19 +[12] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020. 19 +[13] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D. Manning. AuroraCap: Efficient, performant video detailed captioning and a new benchmark. In ICLR, 2025. 5 +[14] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. Hybrid task cascade for instance segmentation. In CVPR, 2019. 19 +[15] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pretraining from pixels. In ICML, 2020. 20 +[16] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020. 20 + +[17] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, Alexander Kolesnikov, Joan Puigcerver, Nan Ding, Keran Rong, Hassan Akbari, Gaurav Mishra, Linting Xue, Ashish Thapliyal, James Bradbury, Weicheng Kuo, Mojtaba Seyedhosseini, Chao Jia, Burcu Karagol Ayan, Carlos Riquelme, Andreas Steiner, Anelia Angelova, Xiaohua Zhai, Neil Houlsby, and Radu Soricut. Pali: A jointly-scaled multilingual language-image model. In ICLR, 2023. 8, 9 +[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, Lixin Gu, Xuehui Wang, Qingyun Li, Yimin Ren, Zixuan Chen, Jiapeng Luo, Jiahao Wang, Tan Jiang, Bo Wang, Conghui He, Botian Shi, Xingcheng Zhang, Han Lv, Yi Wang, Wenqi Shao, Pei Chu, Zhongying Tu, Tong He, Zhiyong Wu, Huipeng Deng, Jiaye Ge, Kai Chen, Kaipeng Zhang, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv:2412.05271, 2024. 11, 15, 16, 20, 32, 33 +[19] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyuan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. InternVL: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024. 1, 6, 7, 9, 10, 20, 26 +[20] Gong Cheng, Junwei Han, and Xiaoqiang Lu. Remote sensing image scene classification: Benchmark and state of the art. Proceedings of the IEEE, 2017. 9 +[21] Jang Hyun Cho, Andrea Madotto, Effrosyni Mavroudi, Triantafyllos Afouras, Tushar Nagarajan, Muhammad Maaz, Yale Song, Tengyu Ma, Shuming Hu, Hanoona Rasheed, Peize Sun, Po-Yao Huang, Daniel Bolya, Suyog Jain, Miguel Martin, Huiyu Wang, Nikhila Ravi, Shashank Jain, Temmy Stark, Shane Moon, Babak Damavandi, Vivian Lee, Andrew Westbury, Salman Khan, Philipp Krahenbuhl, Piotr Dólar, Lorenzo Torresani, Kristen Grauman, and Christoph Feichtenhofer. Perceptionlm: Open-access data and models for detailed visual understanding. arXiv:2504.13180, 2025. 2, 5, 11, 14, 15, 16, 21 +[22] Seokju Cho, Heeseong Shin, Sunghwan Hong, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. CAT-Seg: Cost aggregation for open-vocabulary semantic segmentation. In CVPR, 2024. 20 +[23] Timothee Darcet, Maxime Oquab, Julien Mairal, and Piotr Bojanowski. Vision transformers need registers. In ICLR, 2024. 12, 17 +[24] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdulmohsin, Rodolphe Jenatton, Lucas Beyer, Michael Tschannen, Anurag Arnab, Xiao Wang, Carlos Riquelme, Matthias Minderer, Joan Puigcerver, Utku Evci, Manoj Kumar, Sjoerd van Steenkiste, Gamaleldin F. Elsayed, Aravindh Mahendran, Fisher Yu, Avital Oliver, Fantine Huot, Jasmijn Bastings, Mark Patrick Collier, Alexey Gritsenko, Vighnesh Birodkar, Cristina Vasconcelos, Yi Tay, Thomas Mensink, Alexander Kolesnikov, Filip Pavetic, Dustin Tran, Thomas Kipf, Mario Lučić, Xiaohua Zhai, Daniel Keysers, Jeremiah Harmsen, and Neil Houlsby. Scaling vision transformers to 22 billion parameters. In ICML, 2023. 1, 9 +[25] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weihs, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv:2409.17146, 2024. 16 +[26] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3, 6, 8, 9, 10, 30, 31, 32 +[27] Karan Desai and Justin Johnson. VirTex: Learning visual representations from textual annotations. In CVPR, 2021. 20 +[28] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 20 +[29] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1, 8, 9 + +[30] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pre-training of large autoregressive image models. In ICML, 2024. 20 +[31] David Fan, Shengbang Tong, Jiachen Zhu, Koustuv Sinha, Zhuang Liu, Xinlei Chen, Michael Rabbat, Nicolas Ballas, Yann LeCun, Amir Bar, and Saining Xie. Scaling language-free visual representation learning. arXiv:2504.01017, 2025. 12, 13 +[32] Lijie Fan, Dilip Krishnan, Phillip Isola, Dina Katabi, and Yonglong Tian. Improving CLIP training with language rewrites. In NeurIPS, 2023. 20 +[33] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. In ICLR, 2024. 1, 3, 9, 16, 20, 26 +[34] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA: Exploring the limits of masked visual representation learning at scale. In CVPR, 2023. 1 +[35] Yuxin Fang, Quan Sun, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA-02: A visual representation for neon genesis. Image and Vision Computing, 2024. 1, 19 +[36] Christoph Feichtenhofer. X3D: Expanding architectures for efficient video recognition. In CVPR, 2020. 4 +[37] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T. Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders. In CVPR, 2025. 1, 2, 10, 11, 15, 16, 19, 20, 32, 33 +[38] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-MME: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv:2405.21075, 2024. 14, 15, 16, 32 +[39] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, Eyal Orgad, Rahim Entezari, Giannis Daras, Sarah Pratt, Vivek Ramanujan, Yonatan Bitton, Kalyani Marathe, Stephen Mussmann, Richard Vencu, Mehdi Cherti, Ranjay Krishna, Pang Wei Koh, Olga Saukh, Alexander Ratner, Shuran Song, Hannaneh Hajishirzi, Ali Farhadi, Romain Beaumont, Sewoong Oh, Alex Dimakis, Jenia Jitsev, Yair Carmon, Vaishaal Shankar, and Ludwig Schmidt. DataComp: In search of the next generation of multimodal datasets. In NeurIPS, 2023. 10, 20 +[40] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in VQA matter: Elevating the role of image understanding in visual question answering. In CVPR, 2017. 14, 15, 16, 32 +[41] Agrim Gupta, Piotr Dollar, and Ross Girshick. LVIS: A dataset for large vocabulary instance segmentation. In CVPR, 2019. 19 +[42] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1 +[43] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask R-CNN. In ICCV, 2017. 11, 12, 19, 29 +[44] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022. 1, 19 +[45] Greg Heinrich, Mike Ranzinger, Hongxu, Yin, Yao Lu, Jan Kautz, Andrew Tao, Bryan Catanzaro, and Pavlo Molchanov. RADIOv2.5: Improved baselines for agglomerative vision foundation models. In CVPR, 2025. 1, 10, 18 +[46] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In ICCV, 2021. 3, 8, 9, 30, 31 +[47] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In CVPR, 2021. 3, 4, 8, 9, 30, 31, 32 +[48] Byeongho Heo, Song Park, Dongyoon Han, and Sangdoo Yun. Rotary position embedding for vision transformer. In ECCV, 2024. 20 + +[49] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. In NeurIPS Deep Learning Workshop, 2015. 8 +[50] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In ECCV, 2016. 14, 17 +[51] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. OpenCLIP, 2021. 3, 20 +[52] Allan Jabri, Andrew Owens, and Alexei Efros. Space-time correspondence as a contrastive random walk. In NeurIPS, 2020. 11, 19, 29 +[53] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. TGIF-QA: Toward spatio-temporal reasoning in visual question answering. In CVPR, 2017. 14, 15, 16, 32 +[54] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 20 +[55] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset. arXiv:1705.06950, 2017. 6, 9, 31, 32 +[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 14, 15, 16, 32, 33 +[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 14, 15, 16, 32 +[58] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dolkar, and Ross Girshick. Segment anything. In ICCV, 2023. 5, 18 +[59] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV Workshop, 2013. 9 +[60] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV, 2017. 27, 32 +[61] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In NeurIPS, 2012. 1 +[62] Hildegard Kuehne, Hueihan Jhuang, Estfbaliz Garrote, Tomaso Poggio, and Thomas Serre. HMDB: a large video database for human motion recognition. In ICCV, 2011. 9, 31, 32 +[63] Weicheng Kuo, Yin Cui, Xiuye Gu, A. J. Piergiovanni, and Anelia Angelova. F-VLM: open-vocabulary object detection upon frozen vision and language models. In ICLR, 2023. 20 +[64] Zhengfeng Lai, Haotian Zhang, Bowen Zhang, Wentao Wu, Haoping Bai, Aleksei Timofeev, Xianzhi Du, Zhe Gan, Jiulong Shan, Chen-Nee Chuah, Yinfei Yang, and Meng Cao. VeCLIP: Improving CLIP training via visual-enriched captions. In ECCV, 2024. 5, 20 +[65] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? In NeurIPS, 2024. 27 +[66] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. LLaVA-OneVision: Easy visual task transfer. TMLR, 2025. 16, 20, 22 +[67] Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In ICCV, 2023. 9 +[68] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, Limin Wang, and Yu Qiao. MVBench: A comprehensive multi-modal video understanding benchmark. In CVPR, 2024. 14, 15, 16, 32 + +[69] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 1 +[70] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for CLIP training. In NeurIPS, 2023. 3 +[71] Xianhang Li, Zeyu Wang, and Cihang Xie. CLIPA-v2: Scaling CLIP training with 81.1% zero-shot imagenet accuracy within a $10,000 budget; an extra $4,000 unlocks 81.8% accuracy. arXiv:2306.15658, 2023. 3, 20 +[72] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. In ECCV, 2022. 11, 19, 29 +[73] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, 2023. 14, 15, 16, 32 +[74] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In CVPR, 2023. 20 +[75] Zhenyu Li, Xuyang Wang, Xianming Liu, and Junjun Jiang. Binsformer: Revisiting adaptive bins for monocular depth estimation. TIP, 2024. 29 +[76] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In ECCV, 2014. 2, 6, 9, 12, 14, 15, 16, 19, 27, 31, 32 +[77] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. LLaVA-NeXT: Improved reasoning,OCR, and world knowledge, 2024. 32, 33 +[78] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. NeurIPS, 2024. 20, 23 +[79] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 3, 19 +[80] Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, and Baining Guo. Swin transformer v2: Scaling up capacity and resolution. In CVPR, 2022. 19 +[81] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. In CVPR, 2022. 1 +[82] AI @ Meta Llama Team. The llama 3 herd of models. arXiv:2407.21783, 2024. 5, 14, 15, 16, 20, 32, 33 +[83] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. *ICLR*, 2019. 3, 29 +[84] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. CLIP4Clip: An empirical study of clip for end to end video clip retrieval. Neurocomputing, 2021. 6, 9 +[85] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. SiT: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In ECCV, 2024. 20 +[86] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-ChatGPT: Towards detailed video understanding via large vision and language models. In ACL, 2024. 5 +[87] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. VideoGPT+: Integrating image and video encoders for enhanced video understanding. arXiv:2406.09418, 2024. 5 +[88] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arxiv:1306.5151, 2013. 9 +[89] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. NeurIPS, 2024. 14, 15, 16, 32 +[90] Kevis-Kokitsi Maninis, Kaifeng Chen, Soham Ghosh, Arjun Karpur, Koert Chen, Ye Xia, Bingyi Cao, Daniel Salz, Guangxing Han, Jan Dlabal, et al. Tips: Text-image pretraining with spatial awareness. arXiv:2410.16512, 2024. 1 +[91] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. DocVQA: A dataset for vqa on document images. In WACV, 2021. 14, 15, 16, 32 +[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographics. In WACV, 2022. 14, 15, 16, 32 + +[93] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, Anton Belyi, Haotian Zhang, Karanjeet Singh, Doug Kang, Ankur Jain, Hongyu He, Max Schwarzer, Tom Gunter, Xiang Kong, Aonan Zhang, Jianyu Wang, Chong Wang, Nan Du, Tao Lei, Sam Wiseman, Guoli Yin, Mark Lee, Zirui Wang, Ruoming Pang, Peter Grasch, Alexander Toshev, and Yinfei Yang. MM1: methods, analysis and insights from multimodal LLM pre-training. In ECCV, 2024. 20 +[94] Matthias Minderer, Alexey A. Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. Simple open-vocabulary object detection with vision transformers. In ECCV, 2022. 1, 20 +[95] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In NeurIPS, 2023. 20 +[96] Thao Nguyen, Samir Yitzhak Gadre, Gabriel Ilharco, Sewoong Oh, and Ludwig Schmidt. Improving multimodal datasets with image captioning. In NeurIPS, 2023. 5, 20 +[97] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In ICVGIP, 2008. 9 +[98] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Mido Assran, Nicolas Ballas, Wojciech Galuba, Russell Howes, Po-Yao Huang, Shang-Wen Li, Ishan Misra, Michael Rabbat, Vasu Sharma, Gabriel Synnaeve, Hu Xu, Hervé Jégou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision. TMLR, 2024. 1, 2, 10, 11, 15, 16, 18, 19, 20, 22, 29, 33 +[99] Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, and Philipp Krahenbuhl. NMSstrikes back. arXiv:2212.06137, 2022. 19 +[100] Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In CVPR, 2012. 9 +[101] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. arXiv:2306.14824, 2023. 20 +[102] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Kenji Kawaguchi, Hanxiao Liu, Adams Wei Yu, Jiahui Yu, Yi-Ting Chen, Minh-Thang Luong, Yonghui Wu, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. Neurocomputing, 2023. 1, 9, 20 +[103] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In ICCV, 2015. 27, 32 +[104] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 DAVIS challenge on video object segmentation. arXiv:1704.00675, 2017. 19, 29 +[105] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adriâ Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, Carl Doersch, Tatiana Matejovicova, Yury Sulsky, Antoine Miech, Alex Frechette, Hanna Klimczak, Raphael Koster, Junlin Zhang, Stephanie Winkler, Yusuf Aytar, Simon Osindero, Dima Damen, Andrew Zisserman, and João Carreira. Perception test: A diagnostic benchmark for multimodal video models. In NeurIPS, 2024. 14, 15, 16, 32 +[106] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 3, 8, 9, 15, 16, 19, 20, 31, 32, 33 +[107] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pre-training from videos. arXiv:2501.05453, 2025. 19, 20, 29 +[108] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv:2204.06125, 2022. 1 +[109] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In ICCV, 2021, 11, 19, 29 +[110] Mike Ranzinger, Greg Heinrich, Jan Kautz, and Pavlo Molchanov. AM-RADIO: Agglomerative vision foundation model—reduce all domains into one. In CVPR, 2024. 1, 18, 21 + +[111] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. SAM 2: Segment anything in images and videos. In ICLR, 2024. 2, 5, 17, 18, 34 +[112] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet? In ICML, 2019. 3, 6, 8, 9, 30, 31, 32 +[113] William A. Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. The dollar street dataset: images representing the geographic and socioeconomic diversity of the world. In NeurIPS Datasets and Benchmarks, 2022. 10 +[114] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2022. 1 +[115] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In ECCV, 2020. 20 +[116] Mert Bulent Sariyildiz, Philippe Weinzaepfel, Thomas Lucas, Diane Larlus, and Yannis Kalantidis. UNIC: Universal classification models via multi-teacher distillation. In ECCV, 2024. 18 +[117] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In NeurIPS Datasets and Benchmarks, 2022. 20 +[118] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-OKVQA: A benchmark for visual question answering using world knowledge. In ECCV, 2022. 14, 15, 16, 32 +[119] Jinghuan Shang, Karl Schmeckpeper, Brandon B May, Maria Vittoria Minniti, Tarik Kelestemur, David Watkins, and Laura Herlant. Theia: Distilling diverse vision foundation models for robot learning. In CoRL, 2024. 18 +[120] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In ICCV, 2019. 19 +[121] Shashank Shekhar, Florian Bordes, Pascal Vincent, and Ari Morcos. Objectives matter: Understanding the impact of self-supervised objectives on vision transformer representations. arXiv:2304.13089, 2023. 20 +[122] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension. In ECCV, 2020. 10 +[123] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 19, 29 +[124] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In ICLR, 2015. 1 +[125] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards VQA models that can read. In CVPR, 2019. 14, 15, 16, 32 +[126] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. UCF101: A dataset of 101 human actions classes from videos in the wild. arXiv:1212.0402, 2012. 9, 31, 32 +[127] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. Neurocomputing, 2024. 4, 20, 25 +[128] Lin Sun, Jiale Cao, Jin Xie, Xiaoheng Jiang, and Yanwei Pang. CLIPer: Hierarchically improving spatial representation of CLIP for open-vocabulary semantic segmentation. arXiv:2411.13836, 2024. 20 +[129] Quan Sun, Yuxin Fang, Ledell Wu, Xinlong Wang, and Yue Cao. EVA-CLIP: Improved training techniques for clip at scale. arXiv:2303.15389, 2023. 20 +[130] Quan Sun, Jinsheng Wang, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, and Xinlong Wang. EVA-CLIP-18B: Scaling clip to 18 billion parameters. arXiv:2402.04252, 2024. 1, 9, 10, 20, 26 +[131] Mingxing Tan and Quoc Le. EfficientNet: Rethinking model scaling for convolutional neural networks. In ICML, 2019. 1, 3, 4 +[132] Gemma Team. Gemma 3 technical report. arXiv:2503.19786, 2025. 16, 20 + +[133] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100M: The new data in multimedia research. Communications of the ACM, 2016. 9 +[134] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Ziteng Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024. 11, 20 +[135] Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In ICCV, 2021. 14, 17 +[136] Hugo Touvron, Matthieu Cord, and Hervé Jégou. DeiT III: Revenge of the ViT. In ECCV, 2022. 3 +[137] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. In NeurIPS, 2023. 1, 20 +[138] Michael Tschannen, Alexey Gritsenko, Xiao Wang, Muhammad Ferjad Naeem, Ibrahim Alabdulmohsin, Nikhil Parthasarathy, Talfan Evans, Lucas Beyer, Ye Xia, Basil Mustafa, Olivier Henaff, Jeremiah Harmsen, Andreas Steiner, and Xiaohua Zhai. SigLIP 2: Multilingual vision-language encoders with improved semantic understanding, localization, and dense features. arXiv:2502.14786, 2025. 2, 7, 8, 9, 10, 15, 16, 18, 19, 26, 32, 33 +[139] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating CLIP-style models on dense captions. In CVPR, 2024. 27, 32 +[140] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 10 +[141] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 25 +[142] Matthew Walmer, Saksham Suri, Kamal Gupta, and Abhinav Shrivastava. Teaching matters: Investigating the role of supervision in vision transformers. In CVPR, 2023. 20 +[143] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In NeurIPS, 2019. 3, 8, 9, 30, 31 +[144] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv:2409.12191, 2024. 16, 20 +[145] Wenhai Wang, Jifeng Dai, Zhe Chen, Zhenhang Huang, Zhiqi Li, Xizhou Zhu, Xiaowei Hu, Tong Lu, Lewei Lu, Hongsheng Li, Xiaogang Wang, and Yu Qiao. InternImage: Exploring large-scale vision foundation models with deformable convolutions. In CVPR, 2023. 19 +[146] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, Tianxiang Jiang, Songze Li, Jilan Xu, Hongjie Zhang, Yifei Huang, Yu Qiao, Yali Wang, and Limin Wang. InternVideo2: Scaling foundation models for multimodal video understanding. In ECCV, 2024. 2, 9 +[147] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, 2022. 4, 17 +[148] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. STAR: A benchmark for situated reasoning in real-world videos. In NeurIPS, 2021. 14, 15, 16, 32 +[149] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detector2, 2019. 29 +[150] Jianxiong Xiao, Krista A. Ehinger, James Hays, Antonio Torralba, and Aude Oliva. SUN database: Exploring a large collection of scene categories. IJCV, 2014. 9 +[151] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen tau Yih, Shang-Wen Li, Saining Xie, and Christoph Feichtenhofer. Altogether: Image captioning via re-aligning alt-text. In EMNLP, 2024. 5, 20 +[152] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. In ICLR, 2024. 1, 3, 8, 15, 19, 20, 32, 33 + +[153] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. MSR-VTT: A large video description dataset for bridging video and language. In CVPR, 2016. 6, 7, 31, 32 +[154] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arxiv:2407.10671, 2024. 16 +[155] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv:2412.15115, 2024. 16, 33 +[156] Yang You, Jing Li, Sashank J. Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In ICLR, 2020. 3, 20 +[157] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. TACL, 2014. 9, 14, 15, 16, 32 +[158] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. CoCa: Contrastive captioners are image-text foundation models. TMLR, 2022. 1, 9, 20 +[159] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. In ICLR, 2025, 20, 21 +[160] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1, 4, 7, 9, 16, 19, 20, 22, 25, 26, 30 +[161] Hao Zhang, Feng Li, Shilong Liu, Lei Zhang, Hang Su, Jun Zhu, Lionel M Ni, and Heung-Yeung Shum. DINO: DETR with improved denoising anchor boxes for end-to-end object detection. In ICLR, 2023. 19 +[162] Richard Zhang, Phillip Isola, and Alexei A Efros. Colorful image colorization. In ECCV, 2016. 20 +[163] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D. Manning, and Curtis P. Langlotz. Contrastive learning of medical visual representations from paired images and text. In MLHC, 2022. 20 +[164] Long Zhao, Nitesh Bharadwaj Gundavarapu, Liangzhe Yuan, Hao Zhou, Shen Yan, Jennifer J. Sun, Luke Friedman, Rui Qian, Tobias Weyand, Yue Zhao, Rachel Hornung, Florian Schroff, Ming Yang, David A. Ross, Huisheng Wang, Hartwig Adam, Mikhail Sirotenko, Ting Liu, and Boqing Gong. VideoPrism: A foundational visual encoder for video understanding. In ICML, 2024. 9 +[165] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. In WACV, 2025. 14, 15, 16, 32 +[166] Liang Zheng, Yali Zhao, Shengjin Wang, Jingdong Wang, and Qi Tian. Good practice in cnn feature transfer. arXiv:1604.00133, 2016. 20 +[167] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ADE20K dataset. In CVPR, 2017. 19, 29 +[168] Jinguo Zhu, Weiyun Wang, Zhe Chen, Zhaoyang Liu, Shenglong Ye, Lixin Gu, Yuchen Duan, Hao Tian, Weijie Su, Jie Shao, Zhangwei Gao, Erfei Cui, Yue Cao, Yangzhou Liu, Weiye Xu, Hao Li, Jiahao Wang, Han Lv, Dengnian Chen, Songze Li, Yinan He, Tan Jiang, Jiapeng Luo, Yi Wang, Conghui He, Botian Shi, Xingcheng Zhang, Wenqi Shao, Junjun He, Yingtong Xiong, Wenwen Qu, Peng Sun, Penglong Jiao, Lijun Wu, Kaipeng Zhang, Huipeng Deng, Jiaye Ge, Kai Chen, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. InternVL3: Exploring advanced training and test-time recipes for open-source multimodal models. arxiv:2504.10479, 2025. 2, 16 + +[169] Zhuofan Zong, Guanglu Song, and Yu Liu. DETRs with collaborative hybrid assignments training. In ICCV, 2023. 19 \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13181/images/0419cec4c68d3e21eb66b1cc748ebcb2ae14852eccf08775be3e31fa04f72c48.jpg b/data/2025/2504_13xxx/2504.13181/images/0419cec4c68d3e21eb66b1cc748ebcb2ae14852eccf08775be3e31fa04f72c48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff002baa5883a6cbc2d04238cb9fb0dfb9c8eeef --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/0419cec4c68d3e21eb66b1cc748ebcb2ae14852eccf08775be3e31fa04f72c48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c62adce01bb860e9b30a2a30136e2070426d78496e373b3703a2b03c9b3aa50 +size 10626 diff --git a/data/2025/2504_13xxx/2504.13181/images/053d59bc4e3905221084bc6f9359e600a47cf14faa2f757a8cbbbcf1cd8123f8.jpg b/data/2025/2504_13xxx/2504.13181/images/053d59bc4e3905221084bc6f9359e600a47cf14faa2f757a8cbbbcf1cd8123f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb133352f095e513614155c9d305784a7b4808ed --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/053d59bc4e3905221084bc6f9359e600a47cf14faa2f757a8cbbbcf1cd8123f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37939617d36b6efd9251da4afa6dc08ebb2d242cb674ed2fa35634f8a0342600 +size 8200 diff --git a/data/2025/2504_13xxx/2504.13181/images/054f467c3336982f3f223e3b29240981096ae34d8b34df2a325a937eacb719bb.jpg b/data/2025/2504_13xxx/2504.13181/images/054f467c3336982f3f223e3b29240981096ae34d8b34df2a325a937eacb719bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c7c10a32b6c4fa33058d1c8d6508fee3f88eee7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/054f467c3336982f3f223e3b29240981096ae34d8b34df2a325a937eacb719bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7eb369a52933986d1109e33fedeaed2086b725f2455c12d7694dd03c7e0bfeb +size 20103 diff --git a/data/2025/2504_13xxx/2504.13181/images/09866ed878a98816d1da5b6fe224d8be814c643218717faa1346ddba0c7367a8.jpg b/data/2025/2504_13xxx/2504.13181/images/09866ed878a98816d1da5b6fe224d8be814c643218717faa1346ddba0c7367a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e490cdf21ae9ef0c8908b28d513d51d0a96b41ac --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/09866ed878a98816d1da5b6fe224d8be814c643218717faa1346ddba0c7367a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:444fa1e72e85286a1c3552664c1a9d55bcaa0044bc24b19ab97f10bea3d955ae +size 38661 diff --git a/data/2025/2504_13xxx/2504.13181/images/0a3b7c226af8ac168ff6731a42e3b174d5240bddd13e3945533cd8ad5d5e2282.jpg b/data/2025/2504_13xxx/2504.13181/images/0a3b7c226af8ac168ff6731a42e3b174d5240bddd13e3945533cd8ad5d5e2282.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3acfb841a506d7848958150b1423e55f3fe672bd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/0a3b7c226af8ac168ff6731a42e3b174d5240bddd13e3945533cd8ad5d5e2282.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c77e097e3192f0ec2c894b98bc46271a204f281957d7cbe0b4e586d51d786f4f +size 54988 diff --git a/data/2025/2504_13xxx/2504.13181/images/0ec51d0218f040fa5ad324cfe27ac42cd2a57739eb907bcc101b1e408b010d84.jpg b/data/2025/2504_13xxx/2504.13181/images/0ec51d0218f040fa5ad324cfe27ac42cd2a57739eb907bcc101b1e408b010d84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eaa14d207817fb00f252e0ac8e0998f96876c603 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/0ec51d0218f040fa5ad324cfe27ac42cd2a57739eb907bcc101b1e408b010d84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4b45c0c53eb71699ec8deb26e317c64ead967cfbe0a897d2793e4d59b891780 +size 4299 diff --git a/data/2025/2504_13xxx/2504.13181/images/136f3d22e44c07b73f44f9a797639d42c616d674d5ecb888fb4f0d58cb59d6b2.jpg b/data/2025/2504_13xxx/2504.13181/images/136f3d22e44c07b73f44f9a797639d42c616d674d5ecb888fb4f0d58cb59d6b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e676cccb21fad81e6596269f2bfb1382b2bd217f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/136f3d22e44c07b73f44f9a797639d42c616d674d5ecb888fb4f0d58cb59d6b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7447b019842b6176b99aa9d0e06761265e5462a80f4b1391f6c1181fe864c3f4 +size 14477 diff --git a/data/2025/2504_13xxx/2504.13181/images/14b60cb5564f062b0f5bb2840805e4b8a21d292381a916d262d40bd14b58afec.jpg b/data/2025/2504_13xxx/2504.13181/images/14b60cb5564f062b0f5bb2840805e4b8a21d292381a916d262d40bd14b58afec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee7c4ad9b8f7411ab9c0a14cf380b7f1b250f1a7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/14b60cb5564f062b0f5bb2840805e4b8a21d292381a916d262d40bd14b58afec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ccee871cd1766ed03bb56e644e16497f446436100bc85fbbfc93932dd5c31f2 +size 9362 diff --git a/data/2025/2504_13xxx/2504.13181/images/16aa33f83b9a7e2879ae72919ee8bd13e2c641010eaf3774f481b59e4a45d689.jpg b/data/2025/2504_13xxx/2504.13181/images/16aa33f83b9a7e2879ae72919ee8bd13e2c641010eaf3774f481b59e4a45d689.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4c09d8403f041afe2ee20129e0b6623e388d035 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/16aa33f83b9a7e2879ae72919ee8bd13e2c641010eaf3774f481b59e4a45d689.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec9f0a1dcd516ffc5a22609be97659d9588d8aa167252912eb1d400d7595460f +size 31350 diff --git a/data/2025/2504_13xxx/2504.13181/images/18451512f5568fe8da5336fa21d737dcaf1979fc3235a99cb9187fc3c71d5477.jpg b/data/2025/2504_13xxx/2504.13181/images/18451512f5568fe8da5336fa21d737dcaf1979fc3235a99cb9187fc3c71d5477.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe7c7b6a75f2ba635016a24bca367ee58c71c47b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/18451512f5568fe8da5336fa21d737dcaf1979fc3235a99cb9187fc3c71d5477.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4630a00e32b0e8c7b18e7d934bd84609ec6c13c54e36064d4367086c5f6e5c62 +size 167033 diff --git a/data/2025/2504_13xxx/2504.13181/images/1a404914c26bae32a66185bf5d5c70c669e15b4a0244ddd49a1a6aed2e99497c.jpg b/data/2025/2504_13xxx/2504.13181/images/1a404914c26bae32a66185bf5d5c70c669e15b4a0244ddd49a1a6aed2e99497c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12df500bca9eb3948ab6d142e227674c09032d0f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/1a404914c26bae32a66185bf5d5c70c669e15b4a0244ddd49a1a6aed2e99497c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce82984fa7e4ccf8c94cd53ede92c7edd7fcf5bbaf89681ba36998582b73a42d +size 12115 diff --git a/data/2025/2504_13xxx/2504.13181/images/1b0a4479ace41e4468c9d0b366523e8a1b53945a1a9b0b2119f2404c3d13aa7d.jpg b/data/2025/2504_13xxx/2504.13181/images/1b0a4479ace41e4468c9d0b366523e8a1b53945a1a9b0b2119f2404c3d13aa7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce4e770d9e02acb9b848c01eae8f8fe66e7c124e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/1b0a4479ace41e4468c9d0b366523e8a1b53945a1a9b0b2119f2404c3d13aa7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84b99c174efe53cfc40df4f230f1b04412d0236b72f16840609b490a29e8a45a +size 7875 diff --git a/data/2025/2504_13xxx/2504.13181/images/1d919e2be4972d7ee8c715e35452670a046aa1a14f011a743ec97cf94f488312.jpg b/data/2025/2504_13xxx/2504.13181/images/1d919e2be4972d7ee8c715e35452670a046aa1a14f011a743ec97cf94f488312.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3755ba1c9d1da367c050b38002f06cc883708d5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/1d919e2be4972d7ee8c715e35452670a046aa1a14f011a743ec97cf94f488312.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3bb58cee782216c8e3e927de51b563777f4e425d06323e7aa26004df81a0434 +size 7253 diff --git a/data/2025/2504_13xxx/2504.13181/images/1f522eeb6904381c6bc6ab80156024d78351fcf08325c0327bae44b86bea83f4.jpg b/data/2025/2504_13xxx/2504.13181/images/1f522eeb6904381c6bc6ab80156024d78351fcf08325c0327bae44b86bea83f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..615ed5df244dc587c995620e76dc4263dd1b3c86 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/1f522eeb6904381c6bc6ab80156024d78351fcf08325c0327bae44b86bea83f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44e0a442dac79ebb99d8009c3b4d9e436027c350b7df3eeb186b4a3112256562 +size 5505 diff --git a/data/2025/2504_13xxx/2504.13181/images/20d8251e5ba4bf8d5681fa3ff5be9fcc32ccefbaa9ebd7cec8f10d02b7a25c31.jpg b/data/2025/2504_13xxx/2504.13181/images/20d8251e5ba4bf8d5681fa3ff5be9fcc32ccefbaa9ebd7cec8f10d02b7a25c31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..336dcec1a0acfb6cc3a2c8df444559e36f278ccc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/20d8251e5ba4bf8d5681fa3ff5be9fcc32ccefbaa9ebd7cec8f10d02b7a25c31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81a9ed0575da1d4cd596e4d267f10b435690093ab833e88726c9bf0310890ca +size 8513 diff --git a/data/2025/2504_13xxx/2504.13181/images/224c02e75a06293b632476410d4685357e8faa8895d55dbfbc83851eee821798.jpg b/data/2025/2504_13xxx/2504.13181/images/224c02e75a06293b632476410d4685357e8faa8895d55dbfbc83851eee821798.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcc5383757d9dded5b2f80e7421de6f3d3259084 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/224c02e75a06293b632476410d4685357e8faa8895d55dbfbc83851eee821798.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b226e255672a100f5702076cd6ca462d243ff02d25aa5552253984d689d092 +size 5997 diff --git a/data/2025/2504_13xxx/2504.13181/images/248bf47810a642cd188b3f5505507120452fc477d1e98c80ff4049a8ab332782.jpg b/data/2025/2504_13xxx/2504.13181/images/248bf47810a642cd188b3f5505507120452fc477d1e98c80ff4049a8ab332782.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf65a44d681a0a548c23473e15cf9e8f3a049734 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/248bf47810a642cd188b3f5505507120452fc477d1e98c80ff4049a8ab332782.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:def70f4bca99d3af768b95edbc2a0c34eec1a6258de33d9e524605ad175a88e8 +size 5236 diff --git a/data/2025/2504_13xxx/2504.13181/images/26509c66b2d2df75bdcefbf9805f13ce7b78a3f5f1cd8a7e80b714ae026923a8.jpg b/data/2025/2504_13xxx/2504.13181/images/26509c66b2d2df75bdcefbf9805f13ce7b78a3f5f1cd8a7e80b714ae026923a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc2bb822f880679b7dd5d7dee2fff45300179ecc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/26509c66b2d2df75bdcefbf9805f13ce7b78a3f5f1cd8a7e80b714ae026923a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83e489e430788a145b52193a3af0a81c80f0b5afa5a94b7f9db465fff34739c7 +size 7210 diff --git a/data/2025/2504_13xxx/2504.13181/images/2c2bdea6917b4da14b0cfaa830be0cc38860e78457896c6a78d058c5db5e611e.jpg b/data/2025/2504_13xxx/2504.13181/images/2c2bdea6917b4da14b0cfaa830be0cc38860e78457896c6a78d058c5db5e611e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79b6add7fb8b81eef0e6c67548d834398b67cddb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/2c2bdea6917b4da14b0cfaa830be0cc38860e78457896c6a78d058c5db5e611e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be7e3f3a0ce3533d2a3be81bfccf87a1091e1e16523cc435a9dac901f11883e6 +size 5372 diff --git a/data/2025/2504_13xxx/2504.13181/images/2e35a5b4cdd10463a1e04015eaa50ce24ce1e8fd08ad30843e4c022ac3a800c4.jpg b/data/2025/2504_13xxx/2504.13181/images/2e35a5b4cdd10463a1e04015eaa50ce24ce1e8fd08ad30843e4c022ac3a800c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e35d350d3b1cee64863413d8a1e032bf5ca4c77c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/2e35a5b4cdd10463a1e04015eaa50ce24ce1e8fd08ad30843e4c022ac3a800c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebb8ce2739fb6c2d387356ace88ff534a882aae3a3a5bda00808e2a59c47e7cb +size 93868 diff --git a/data/2025/2504_13xxx/2504.13181/images/319104e044a229bfe0fc6e17d447be4ef0e0b642fbd22115c20140690a4d8292.jpg b/data/2025/2504_13xxx/2504.13181/images/319104e044a229bfe0fc6e17d447be4ef0e0b642fbd22115c20140690a4d8292.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24559c0689e07cb5ed6b74400d8087f4c8f16bfd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/319104e044a229bfe0fc6e17d447be4ef0e0b642fbd22115c20140690a4d8292.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:606f3b75c780774872942a2f9d84266c4b3e9f3c086dfaf43f71edf50e47ac93 +size 15717 diff --git a/data/2025/2504_13xxx/2504.13181/images/389c8fd6f1342ba0acefdf83153292853134b64b77cc3dafeca0867b5135efc2.jpg b/data/2025/2504_13xxx/2504.13181/images/389c8fd6f1342ba0acefdf83153292853134b64b77cc3dafeca0867b5135efc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecbfcea1003a373df0d099c2f674f387830951d7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/389c8fd6f1342ba0acefdf83153292853134b64b77cc3dafeca0867b5135efc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9492cdf9414dce4efe543173f19a4784df9c61825f0575e4d7129d9652fbb12b +size 56730 diff --git a/data/2025/2504_13xxx/2504.13181/images/3b8467cf873fe448328bb00c09bd6f8eaa56dfe7a5132e65f77156c552df6aff.jpg b/data/2025/2504_13xxx/2504.13181/images/3b8467cf873fe448328bb00c09bd6f8eaa56dfe7a5132e65f77156c552df6aff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..434d68da70079082d27d2e3fc05346fbf63bfa4b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/3b8467cf873fe448328bb00c09bd6f8eaa56dfe7a5132e65f77156c552df6aff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ea880cb8cafe3e24b34b3cdd3cc195ba784a3f9689e61863f96c1765c3fec36 +size 63338 diff --git a/data/2025/2504_13xxx/2504.13181/images/3b9c2fc708c6ae91f43db0079c0a53d5a6bfe209c1b5de951dd81e4a3cdb737b.jpg b/data/2025/2504_13xxx/2504.13181/images/3b9c2fc708c6ae91f43db0079c0a53d5a6bfe209c1b5de951dd81e4a3cdb737b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35cdf7414e750a003e5405473e97fe3f92cdd854 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/3b9c2fc708c6ae91f43db0079c0a53d5a6bfe209c1b5de951dd81e4a3cdb737b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbdbd324312f756bbbc6b7ea6dbdbab81f7080955cae8127a5e69a4b4e0ac049 +size 18534 diff --git a/data/2025/2504_13xxx/2504.13181/images/3c29c8dd5ab4adbf5915fbfd3e6f44dbee77cedbf231bae611128ee005d47ba6.jpg b/data/2025/2504_13xxx/2504.13181/images/3c29c8dd5ab4adbf5915fbfd3e6f44dbee77cedbf231bae611128ee005d47ba6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3f62a54f21842dfd54baceb15f8ccf8405d9c93 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/3c29c8dd5ab4adbf5915fbfd3e6f44dbee77cedbf231bae611128ee005d47ba6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:243d2720867462e20a9f9bcba82f7011d22dd62f1fb77c866318b3d82d7d9418 +size 5317 diff --git a/data/2025/2504_13xxx/2504.13181/images/3cde832c80ead650dd1257b3e36e558af544c1df53f380fb5c7963a9230eccf0.jpg b/data/2025/2504_13xxx/2504.13181/images/3cde832c80ead650dd1257b3e36e558af544c1df53f380fb5c7963a9230eccf0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f3eebeccc8a99c6df0d94543ec3da3fefe98bf9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/3cde832c80ead650dd1257b3e36e558af544c1df53f380fb5c7963a9230eccf0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65cc84896f71fea764655ec7bd67c8bc8cdf5800f50880c47710e41f1500d5c3 +size 31112 diff --git a/data/2025/2504_13xxx/2504.13181/images/3e39ae7eaea818e7ae127909f0ae634826915332b7a34afe41e57b9b94cbbbd5.jpg b/data/2025/2504_13xxx/2504.13181/images/3e39ae7eaea818e7ae127909f0ae634826915332b7a34afe41e57b9b94cbbbd5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe21561d00b86643d933bfd1143bb12ebc4cc97b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/3e39ae7eaea818e7ae127909f0ae634826915332b7a34afe41e57b9b94cbbbd5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01582a4ea72f912d7d92ac2105acea239d3a935ccd8144f7371833b8718d1cb3 +size 9730 diff --git a/data/2025/2504_13xxx/2504.13181/images/4196311b231b92cff14d1bbf10a6730543bbd841e4e2ac323e2216df1265ffdd.jpg b/data/2025/2504_13xxx/2504.13181/images/4196311b231b92cff14d1bbf10a6730543bbd841e4e2ac323e2216df1265ffdd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7ca5cc574e0f4cbc3870d211338b57000e431dc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/4196311b231b92cff14d1bbf10a6730543bbd841e4e2ac323e2216df1265ffdd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49b47bc431738bc7b3090c07ca68fde0b4ca7f277746c309af06e7c95a322122 +size 5254 diff --git a/data/2025/2504_13xxx/2504.13181/images/49aa7f6eea2b488de6007811ec74e7125803de849e677ead4a356be63e1d3d17.jpg b/data/2025/2504_13xxx/2504.13181/images/49aa7f6eea2b488de6007811ec74e7125803de849e677ead4a356be63e1d3d17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ef22612ae735b64dad3a835c00fdfcfee2e6842 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/49aa7f6eea2b488de6007811ec74e7125803de849e677ead4a356be63e1d3d17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63add5eec53a274dce809a558c5bd3aab3af1d9955e7e511a6e7dd72d52f3d8a +size 5169 diff --git a/data/2025/2504_13xxx/2504.13181/images/5225fa73ec00cf4cdb350eeeb68dc140d3367e44438e9b1899dd91f68f3034d2.jpg b/data/2025/2504_13xxx/2504.13181/images/5225fa73ec00cf4cdb350eeeb68dc140d3367e44438e9b1899dd91f68f3034d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea24c1d5e4a6988458ebba13703c009ed152db48 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/5225fa73ec00cf4cdb350eeeb68dc140d3367e44438e9b1899dd91f68f3034d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1e6e3e1679de6bd675488c116dc0dc3da1ac24ec8e4a40ae5028698eb92b7a5 +size 21987 diff --git a/data/2025/2504_13xxx/2504.13181/images/553115ec45af9f6b65240ce997ff35a15124c9f853156f60dd5b3a711732ed88.jpg b/data/2025/2504_13xxx/2504.13181/images/553115ec45af9f6b65240ce997ff35a15124c9f853156f60dd5b3a711732ed88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a8fcb5a842c91188b721946aa19e65f97d21333 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/553115ec45af9f6b65240ce997ff35a15124c9f853156f60dd5b3a711732ed88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41170e85e4c423461e43019619c47be355a56695fb7a985d39096a224f813325 +size 5220 diff --git a/data/2025/2504_13xxx/2504.13181/images/5588664e10ed778735c066db9c914cdbe8fd018b0c878322cb89d5734afe3da3.jpg b/data/2025/2504_13xxx/2504.13181/images/5588664e10ed778735c066db9c914cdbe8fd018b0c878322cb89d5734afe3da3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63944d2ad2ef4e825cd9424513c2fd2a9713e63f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/5588664e10ed778735c066db9c914cdbe8fd018b0c878322cb89d5734afe3da3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0b015b67c8e8b206c056dd5c6f438111c2a563b09929ed8716c43f950763134 +size 16246 diff --git a/data/2025/2504_13xxx/2504.13181/images/5f933611d6bf3d2dff057f4942f99e38f65cccdfe21d7a7d1c12204493408f06.jpg b/data/2025/2504_13xxx/2504.13181/images/5f933611d6bf3d2dff057f4942f99e38f65cccdfe21d7a7d1c12204493408f06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93bdd1c712d006659e227039732ef21f13a73d87 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/5f933611d6bf3d2dff057f4942f99e38f65cccdfe21d7a7d1c12204493408f06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:098e3bb7242fed8a752f37a9a4beed53a61f9f598d59373e274514eb471bbedf +size 11241 diff --git a/data/2025/2504_13xxx/2504.13181/images/60d858a7dc9bdf799b7f99cce57d2f46fdf7293c74a4009040792d6e25d33c2a.jpg b/data/2025/2504_13xxx/2504.13181/images/60d858a7dc9bdf799b7f99cce57d2f46fdf7293c74a4009040792d6e25d33c2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9a9ef230549937d6e101a9694d6fc41a2567852 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/60d858a7dc9bdf799b7f99cce57d2f46fdf7293c74a4009040792d6e25d33c2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35f8522a06491bad1e148009fb516178cb693f8bebe8631fd79d7264090e4a08 +size 11845 diff --git a/data/2025/2504_13xxx/2504.13181/images/60e724836714f148b095c958f2fa3c8365440358f8725695bdf36b507e2fa403.jpg b/data/2025/2504_13xxx/2504.13181/images/60e724836714f148b095c958f2fa3c8365440358f8725695bdf36b507e2fa403.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a92eb730ebcafd1bdfb21a9f59dfd2a06d810aa --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/60e724836714f148b095c958f2fa3c8365440358f8725695bdf36b507e2fa403.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bcdb152aab691d7ae2d4f68c4b9bcece31fd67ee7eb6353e10f3ec9842b2481 +size 8272 diff --git a/data/2025/2504_13xxx/2504.13181/images/60f27e61231de16ef8490017adbcf2ac7e92d19c25c123c43f3166e7cb26afdf.jpg b/data/2025/2504_13xxx/2504.13181/images/60f27e61231de16ef8490017adbcf2ac7e92d19c25c123c43f3166e7cb26afdf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..146fb34341f9d66db02bbdd7ce3153d856bc9aec --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/60f27e61231de16ef8490017adbcf2ac7e92d19c25c123c43f3166e7cb26afdf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f48ceee076c5baebdf4738281e63bc7728d93fa785791bae0e86016e86a344e +size 28342 diff --git a/data/2025/2504_13xxx/2504.13181/images/6165b8d321ec714c5d44d432ac6923a8fa593d185ccc1fa1cd8b55f45852f7e8.jpg b/data/2025/2504_13xxx/2504.13181/images/6165b8d321ec714c5d44d432ac6923a8fa593d185ccc1fa1cd8b55f45852f7e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c54653bcc1251f4f1692fda1649b6bf5a320b418 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/6165b8d321ec714c5d44d432ac6923a8fa593d185ccc1fa1cd8b55f45852f7e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:438a355c8cc0d0c4631a111640385b1cfdb4f462cf98bc43cfc124b246366492 +size 9223 diff --git a/data/2025/2504_13xxx/2504.13181/images/64c17d9abfbb16b4abdaf37be7f39a74f506431fd7c28364f87176745b193285.jpg b/data/2025/2504_13xxx/2504.13181/images/64c17d9abfbb16b4abdaf37be7f39a74f506431fd7c28364f87176745b193285.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cefdd6cbb84b4f27c8822542f8850f06ac6ad448 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/64c17d9abfbb16b4abdaf37be7f39a74f506431fd7c28364f87176745b193285.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f0f2b270f2aadaa747dd0398ae0df17b91e9da1eec6efdccca8e9fbdb568222 +size 30976 diff --git a/data/2025/2504_13xxx/2504.13181/images/680e4b398d5756980de8964a501a60f2ed9e9bc97c89dc8c8f2713f06c35df5c.jpg b/data/2025/2504_13xxx/2504.13181/images/680e4b398d5756980de8964a501a60f2ed9e9bc97c89dc8c8f2713f06c35df5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..670282289cc614a5a4c40433fe6483bad479fa52 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/680e4b398d5756980de8964a501a60f2ed9e9bc97c89dc8c8f2713f06c35df5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c31d82e1eef071c719cd4295e9648dbbe24735d502dec1494a04b71da3c8a63f +size 8260 diff --git a/data/2025/2504_13xxx/2504.13181/images/6aaf571ddc34b68dd60e42fa52c459e5fa0be4d384dfe35f17bc16668a48d9aa.jpg b/data/2025/2504_13xxx/2504.13181/images/6aaf571ddc34b68dd60e42fa52c459e5fa0be4d384dfe35f17bc16668a48d9aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1732595d5ce65bedb3f75e85329092d25b5b3cb6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/6aaf571ddc34b68dd60e42fa52c459e5fa0be4d384dfe35f17bc16668a48d9aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21ba259e688facbfbfaeb02420f738ef92fb0a09d683ee5865b9865f733a23f3 +size 42418 diff --git a/data/2025/2504_13xxx/2504.13181/images/6e9ec06538492e622f82c5cc96b947e2ebc7d1e0c82c1caab229a7bc02bc9d11.jpg b/data/2025/2504_13xxx/2504.13181/images/6e9ec06538492e622f82c5cc96b947e2ebc7d1e0c82c1caab229a7bc02bc9d11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5764f0f4c042d214331cfc8ab8f8de5d12c00e23 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/6e9ec06538492e622f82c5cc96b947e2ebc7d1e0c82c1caab229a7bc02bc9d11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fe0326844bfb93bd36666855c9f80b73ae7e730c92a72dc2e920921aef4b2c1 +size 19640 diff --git a/data/2025/2504_13xxx/2504.13181/images/7478d5141e36888fb5f937cef316c63db3911b313ad911182a76d9d80cc5f380.jpg b/data/2025/2504_13xxx/2504.13181/images/7478d5141e36888fb5f937cef316c63db3911b313ad911182a76d9d80cc5f380.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a43405d865f6f49d74b167a9bc5487be162622e8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/7478d5141e36888fb5f937cef316c63db3911b313ad911182a76d9d80cc5f380.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d75e87c69a69d88b052688ba4deec0cd8bff6d032437564d072797060c9634ea +size 14520 diff --git a/data/2025/2504_13xxx/2504.13181/images/754240b0fdfb6b195dadcbc2f6c7fd2fc9c772c307dec34acb8e1c27fc16616c.jpg b/data/2025/2504_13xxx/2504.13181/images/754240b0fdfb6b195dadcbc2f6c7fd2fc9c772c307dec34acb8e1c27fc16616c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8250bf0807a77a6d26fb6f1560e5afe04263f1e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/754240b0fdfb6b195dadcbc2f6c7fd2fc9c772c307dec34acb8e1c27fc16616c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6bd035759c6f73b5bead4daa19ab8249274f2ec662d32ef6af777ccca0717c6 +size 13130 diff --git a/data/2025/2504_13xxx/2504.13181/images/775649cb979ad831b819e01ac5e03a0dcd2653c5882b8a59349bf10a1ceb5b89.jpg b/data/2025/2504_13xxx/2504.13181/images/775649cb979ad831b819e01ac5e03a0dcd2653c5882b8a59349bf10a1ceb5b89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0081383572e75607f84f14b3e946c185ceed099d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/775649cb979ad831b819e01ac5e03a0dcd2653c5882b8a59349bf10a1ceb5b89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:382280b57b0f617bc014a0e94de480d22a540a99fb3065d883e142955f2a28f0 +size 192952 diff --git a/data/2025/2504_13xxx/2504.13181/images/78c17213db120c5ebe12a05496435a8d303071b8bdb87f16e11e39ab47c4765b.jpg b/data/2025/2504_13xxx/2504.13181/images/78c17213db120c5ebe12a05496435a8d303071b8bdb87f16e11e39ab47c4765b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85ef15b769f29661c5b08d7b34f79b1a20f152ec --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/78c17213db120c5ebe12a05496435a8d303071b8bdb87f16e11e39ab47c4765b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:400317c00a5e101ba12307a1b5c65dd17690e04af9d5b024b8bb0ecd5eb9590f +size 7520 diff --git a/data/2025/2504_13xxx/2504.13181/images/78f9649d19944548bbb57462f9d14c6aafc7fcd268b77b96d9d92ea589b00fab.jpg b/data/2025/2504_13xxx/2504.13181/images/78f9649d19944548bbb57462f9d14c6aafc7fcd268b77b96d9d92ea589b00fab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2669de7ccd4ef07d3f1b48c0b77b799322f64c20 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/78f9649d19944548bbb57462f9d14c6aafc7fcd268b77b96d9d92ea589b00fab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77846bd1d55fb8314b4fea35cbfe1b53f3ff99aa280cb25597986235d874d0a4 +size 9939 diff --git a/data/2025/2504_13xxx/2504.13181/images/7a97e51e5aed728ca60da5c0696ac87233b5df8d196112a189b1a35ae2cf82df.jpg b/data/2025/2504_13xxx/2504.13181/images/7a97e51e5aed728ca60da5c0696ac87233b5df8d196112a189b1a35ae2cf82df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92c80a3a1112f75f4043f32fb4c5b0501cc5cf4c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/7a97e51e5aed728ca60da5c0696ac87233b5df8d196112a189b1a35ae2cf82df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3cb1a5e8e5a771991e9332c5dff5ebc06979768a28211064ea4e3f914f1b98f +size 17371 diff --git a/data/2025/2504_13xxx/2504.13181/images/8139c17439c85304530ae62c31deae51f505bb24714eb0c4741895c57897056c.jpg b/data/2025/2504_13xxx/2504.13181/images/8139c17439c85304530ae62c31deae51f505bb24714eb0c4741895c57897056c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d66a7d4a81cf306a1c384ef3f14c32e02f17f79 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/8139c17439c85304530ae62c31deae51f505bb24714eb0c4741895c57897056c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d4da2014545150dbd6fcde8797b6e3eb845e0c12b8d7591a6428fff6342706d +size 7260 diff --git a/data/2025/2504_13xxx/2504.13181/images/82b6d68c90e01f9d4518fb5aa8723e794719fc68e52e2d11a08233decc536e99.jpg b/data/2025/2504_13xxx/2504.13181/images/82b6d68c90e01f9d4518fb5aa8723e794719fc68e52e2d11a08233decc536e99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5cc98d5254a2c512584dde9325239f11ac03e9d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/82b6d68c90e01f9d4518fb5aa8723e794719fc68e52e2d11a08233decc536e99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f844a5ca8c0dc753638a3c3f839cb0c33fb1795a86c0ff13a83063ffb133b93 +size 5346 diff --git a/data/2025/2504_13xxx/2504.13181/images/84daeda79e0b2b2f621e8931263c95a39b53a1427968537d1a6a10eec3f36ab8.jpg b/data/2025/2504_13xxx/2504.13181/images/84daeda79e0b2b2f621e8931263c95a39b53a1427968537d1a6a10eec3f36ab8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b54ad794c6a59ea4de38e35bd6888d7689db4ce --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/84daeda79e0b2b2f621e8931263c95a39b53a1427968537d1a6a10eec3f36ab8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:187481506d21d1edd259e0f07d0a456445dc7517da8bf3ac8809cb52b6868ab8 +size 15169 diff --git a/data/2025/2504_13xxx/2504.13181/images/84e3e8bf18b5ba8bbb0729dc244ad4d6daf947726233ee1ff38ac64927363783.jpg b/data/2025/2504_13xxx/2504.13181/images/84e3e8bf18b5ba8bbb0729dc244ad4d6daf947726233ee1ff38ac64927363783.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42a560ad3363a9b83b62480a43a79c504d32f4d0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/84e3e8bf18b5ba8bbb0729dc244ad4d6daf947726233ee1ff38ac64927363783.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae289533db88d5d1c519e3ad0f035cb7e750ba6db36bc5d4f9cb25807853a86c +size 20936 diff --git a/data/2025/2504_13xxx/2504.13181/images/8a7f45e906b49ad1978fa1578eb35d9ff55a0a97c4468b17d2db66e85fd3b4a2.jpg b/data/2025/2504_13xxx/2504.13181/images/8a7f45e906b49ad1978fa1578eb35d9ff55a0a97c4468b17d2db66e85fd3b4a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9548b08956e5eddf54e61bafa274fb897faf10a9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/8a7f45e906b49ad1978fa1578eb35d9ff55a0a97c4468b17d2db66e85fd3b4a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:764ef68dfa52943228b8ee49c2d56e73149b120c75b7b994c88bcb296afd87f6 +size 18977 diff --git a/data/2025/2504_13xxx/2504.13181/images/8eb0b6d76f12e7e9096fe1271f2475ea64da59c454705c61c47c9dd77a4cdd00.jpg b/data/2025/2504_13xxx/2504.13181/images/8eb0b6d76f12e7e9096fe1271f2475ea64da59c454705c61c47c9dd77a4cdd00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b40ddf574bc9b628f5a1157a2a482b77a4e92627 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/8eb0b6d76f12e7e9096fe1271f2475ea64da59c454705c61c47c9dd77a4cdd00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2897e5aed0768e4ac03a5c2dd4390c305003a3beed3f72c7b32c7d800aa888e7 +size 8172 diff --git a/data/2025/2504_13xxx/2504.13181/images/92e7132599ebc88ee01bb6b8843129af5bde6cdef2acbe9db261ddbed7c8ddd1.jpg b/data/2025/2504_13xxx/2504.13181/images/92e7132599ebc88ee01bb6b8843129af5bde6cdef2acbe9db261ddbed7c8ddd1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f419e2e04ba9aadec930545bf4de2ffd5a8f642 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/92e7132599ebc88ee01bb6b8843129af5bde6cdef2acbe9db261ddbed7c8ddd1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bed11955c27c4f8830aad1d7dc83bad8d2cea014803e6527401ca012c89fc5fe +size 8457 diff --git a/data/2025/2504_13xxx/2504.13181/images/97e0715fa950508aba5efbe4d86caa4736b44d0c0bc64e09a56362282f848505.jpg b/data/2025/2504_13xxx/2504.13181/images/97e0715fa950508aba5efbe4d86caa4736b44d0c0bc64e09a56362282f848505.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74c86b0ba98a5f992610e90ca60e41b8b65747a4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/97e0715fa950508aba5efbe4d86caa4736b44d0c0bc64e09a56362282f848505.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3444a1e99b24fee3046c580dce676b366d74bc0cf28d86a1cc87fd67a0309fec +size 8450 diff --git a/data/2025/2504_13xxx/2504.13181/images/98121d3bed5a35310ba152c9861be31ac69dd8a0d1f018191a8a6603f9f86662.jpg b/data/2025/2504_13xxx/2504.13181/images/98121d3bed5a35310ba152c9861be31ac69dd8a0d1f018191a8a6603f9f86662.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5457fc034611ba0d0da413154b33366cbe191f2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/98121d3bed5a35310ba152c9861be31ac69dd8a0d1f018191a8a6603f9f86662.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58e9310ef364e82dee2156016ebc8284af0909325145a910edbc50557204f258 +size 37538 diff --git a/data/2025/2504_13xxx/2504.13181/images/9c8a0c3c990863113bbb03ad44d3adc6e267d088dfd26283ab9f4a70b3660758.jpg b/data/2025/2504_13xxx/2504.13181/images/9c8a0c3c990863113bbb03ad44d3adc6e267d088dfd26283ab9f4a70b3660758.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04c12ebcca69b4c4ee8bafe5b3550752c62142fb --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/9c8a0c3c990863113bbb03ad44d3adc6e267d088dfd26283ab9f4a70b3660758.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d28c89acdc27455611d32d440a9300bc93908213bf479ef703303917dcd80c0f +size 7986 diff --git a/data/2025/2504_13xxx/2504.13181/images/9ec94d9a6be8e905327609a5476b63a26195687f800eb8a20f47f4a56e5d1b8b.jpg b/data/2025/2504_13xxx/2504.13181/images/9ec94d9a6be8e905327609a5476b63a26195687f800eb8a20f47f4a56e5d1b8b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17701cf16f2ffc3cd7231dccf55a0a05ed81ea47 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/9ec94d9a6be8e905327609a5476b63a26195687f800eb8a20f47f4a56e5d1b8b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:064810944f343fc29ad769eb6c8ef5681cba0ac1a98aec59194241d407e5a7de +size 29681 diff --git a/data/2025/2504_13xxx/2504.13181/images/9f90e426884e9ecb7989d4a28e0d99bfb2443af89b14067a3357237e5afd2003.jpg b/data/2025/2504_13xxx/2504.13181/images/9f90e426884e9ecb7989d4a28e0d99bfb2443af89b14067a3357237e5afd2003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb0fb3501ff59aa82ea66b0038ddaa68e38fd276 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/9f90e426884e9ecb7989d4a28e0d99bfb2443af89b14067a3357237e5afd2003.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74cb0a30c9d14bcb2668e9bcf1720f4099e816695656ceabbd936da562dd8a18 +size 5653 diff --git a/data/2025/2504_13xxx/2504.13181/images/a153733586205ebf63f98fe5ca0ce22decc28af55d3f7589bc0d01e7ddca09b3.jpg b/data/2025/2504_13xxx/2504.13181/images/a153733586205ebf63f98fe5ca0ce22decc28af55d3f7589bc0d01e7ddca09b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ff54ccd199a926d838d3be4a6f2b8eb27c3c4ef --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/a153733586205ebf63f98fe5ca0ce22decc28af55d3f7589bc0d01e7ddca09b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad8f872fdf6878e0f701d52a17c72c79ea465e2f92c795a6f310e5079adb00c3 +size 50078 diff --git a/data/2025/2504_13xxx/2504.13181/images/a2b482a782d2db69b553dc95ebc085fdd0e0dfdd61c5f58feb5493e4e9b8bf2f.jpg b/data/2025/2504_13xxx/2504.13181/images/a2b482a782d2db69b553dc95ebc085fdd0e0dfdd61c5f58feb5493e4e9b8bf2f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb0a0b9c158ed69f93714de3877546bc73bfbf88 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/a2b482a782d2db69b553dc95ebc085fdd0e0dfdd61c5f58feb5493e4e9b8bf2f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb28c381e2f555096ab0d0b5765ee4a867a7960738d1b960064f320c776c39d5 +size 204012 diff --git a/data/2025/2504_13xxx/2504.13181/images/a52d0f7bd5611a851c47ac32cdcbebaceb3ff1ce3d7386aac43357af4d5233e1.jpg b/data/2025/2504_13xxx/2504.13181/images/a52d0f7bd5611a851c47ac32cdcbebaceb3ff1ce3d7386aac43357af4d5233e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce73205e38c7bb60241817961d87a29c39afb119 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/a52d0f7bd5611a851c47ac32cdcbebaceb3ff1ce3d7386aac43357af4d5233e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6f161e573f90af6189d653fd558f7729f19e72f950e176e5e6d1e1c8c3b4a28 +size 8338 diff --git a/data/2025/2504_13xxx/2504.13181/images/a542d95f67d6e1d95a991683d76f091ce129ea008cea4253e777d56226a43c1e.jpg b/data/2025/2504_13xxx/2504.13181/images/a542d95f67d6e1d95a991683d76f091ce129ea008cea4253e777d56226a43c1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e46fcac0a4c5828906287bd36b42282f3b29e80 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/a542d95f67d6e1d95a991683d76f091ce129ea008cea4253e777d56226a43c1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acb57944654d184edf1f0cf61fe55d4e6c0e808978e6443cb1f4bf18a6ba6560 +size 53039 diff --git a/data/2025/2504_13xxx/2504.13181/images/a5560ec88cc71a74991ee5cc6e041c018b9aaa38f6f3e9e3aff01f61f4f5de3c.jpg b/data/2025/2504_13xxx/2504.13181/images/a5560ec88cc71a74991ee5cc6e041c018b9aaa38f6f3e9e3aff01f61f4f5de3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfc4895df1828a2de910d25b43ada6fe99f488e8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/a5560ec88cc71a74991ee5cc6e041c018b9aaa38f6f3e9e3aff01f61f4f5de3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e016d39ac899d4434c4d022f43d6dd819aaeb20d019a7073dd7d932e044a48c4 +size 46257 diff --git a/data/2025/2504_13xxx/2504.13181/images/a7faa8bb779978d62ee631eba3e97506f422bda9d1a13e3b20786dd962483c5d.jpg b/data/2025/2504_13xxx/2504.13181/images/a7faa8bb779978d62ee631eba3e97506f422bda9d1a13e3b20786dd962483c5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f4192014143a38e5ae361cee95733b6462f6384 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/a7faa8bb779978d62ee631eba3e97506f422bda9d1a13e3b20786dd962483c5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1f104020807b9b0421a8af5014fe5d149859b04c688a1bac0871b62d1d6904a +size 15723 diff --git a/data/2025/2504_13xxx/2504.13181/images/abc1114a11768f47e364fe16d4aef24261b196f0d49fc64674d66412edf9825b.jpg b/data/2025/2504_13xxx/2504.13181/images/abc1114a11768f47e364fe16d4aef24261b196f0d49fc64674d66412edf9825b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74962eedd616ceef42c83b190c32434830acc98b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/abc1114a11768f47e364fe16d4aef24261b196f0d49fc64674d66412edf9825b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72d5f2e8bc8436c7237837fe9f12529af6bbd25466f772260c60843e2388ec74 +size 49159 diff --git a/data/2025/2504_13xxx/2504.13181/images/b2f874bec500ec2a5eb8600f35c03af98da22cff32721a4b4f68cdca969810fc.jpg b/data/2025/2504_13xxx/2504.13181/images/b2f874bec500ec2a5eb8600f35c03af98da22cff32721a4b4f68cdca969810fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80b7815f27d704708d706f949d813b8b0b4314d8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/b2f874bec500ec2a5eb8600f35c03af98da22cff32721a4b4f68cdca969810fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52c8a2a77cd021aa1060ddff078cf59ab96c707f748637032e416297733e6f5d +size 166266 diff --git a/data/2025/2504_13xxx/2504.13181/images/b442b6245b11605dc45b684c129fb444053fa41af40d35c15539a8fc5181254b.jpg b/data/2025/2504_13xxx/2504.13181/images/b442b6245b11605dc45b684c129fb444053fa41af40d35c15539a8fc5181254b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03dc1475b552a2450c5ff65d53a2409ebfddfd3a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/b442b6245b11605dc45b684c129fb444053fa41af40d35c15539a8fc5181254b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c52d70f4528100d6a20cf0bdaeb65ca118d61f293a89121e3b015c95c94d2f5 +size 23322 diff --git a/data/2025/2504_13xxx/2504.13181/images/b8d97850bf3742315f6fb8c066d8dda1568ae7083e36a3556ca7fe5042281f80.jpg b/data/2025/2504_13xxx/2504.13181/images/b8d97850bf3742315f6fb8c066d8dda1568ae7083e36a3556ca7fe5042281f80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0023731702e309e26eaf62d2fbfcb728732f931e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/b8d97850bf3742315f6fb8c066d8dda1568ae7083e36a3556ca7fe5042281f80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81df78a1e2ba88c270a8256279db7ed4106a7c5a299fb01435ddf4cf0ec095c9 +size 9376 diff --git a/data/2025/2504_13xxx/2504.13181/images/bb11b4227ea27e2b7a911634295b0442145b980ca3b98799f6c03070636667d3.jpg b/data/2025/2504_13xxx/2504.13181/images/bb11b4227ea27e2b7a911634295b0442145b980ca3b98799f6c03070636667d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2162943f5972a056ba3adc058eac030a334b9dd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/bb11b4227ea27e2b7a911634295b0442145b980ca3b98799f6c03070636667d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60922f84221191c94f0b8b41407f6dcb812f5dd17ca1e71f5e62ac09f3b42631 +size 41509 diff --git a/data/2025/2504_13xxx/2504.13181/images/c293da833f1f864b33c52c0b9a7471a4ecaa95fb54573885bda040013500ef54.jpg b/data/2025/2504_13xxx/2504.13181/images/c293da833f1f864b33c52c0b9a7471a4ecaa95fb54573885bda040013500ef54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8468f601c6058ae5547f421da9d08d103ff64d7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/c293da833f1f864b33c52c0b9a7471a4ecaa95fb54573885bda040013500ef54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51dff208240607779250db06a098edffb2362961d3dbca3fa968045349a16115 +size 40103 diff --git a/data/2025/2504_13xxx/2504.13181/images/c316ddc2973a2c132e703a17948017da60b2c81884dbd22cf8abbd5cd3d8dd51.jpg b/data/2025/2504_13xxx/2504.13181/images/c316ddc2973a2c132e703a17948017da60b2c81884dbd22cf8abbd5cd3d8dd51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de6c93ee4ad27597e7697d4d8788a694e8b9ff37 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/c316ddc2973a2c132e703a17948017da60b2c81884dbd22cf8abbd5cd3d8dd51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7f229ab845202351b7f86ba0b8a351adc457f56fa5c5c7023e6ab3274b7a392 +size 86669 diff --git a/data/2025/2504_13xxx/2504.13181/images/c4c7c849b0dc9295c39690b836481b60a614a3ea89eddad3e9fbbbcb72ed2aaa.jpg b/data/2025/2504_13xxx/2504.13181/images/c4c7c849b0dc9295c39690b836481b60a614a3ea89eddad3e9fbbbcb72ed2aaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83d502ed6d4883f28ffa2dbd0a4ae94f706e10ce --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/c4c7c849b0dc9295c39690b836481b60a614a3ea89eddad3e9fbbbcb72ed2aaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b49797c361dbfb7de88428e5d8ed754c1b71695c78bc79b03482509c7f80ac13 +size 99574 diff --git a/data/2025/2504_13xxx/2504.13181/images/c532d458f803584390cf5e69b8ff8dfe0debb484e426f8af53a4c2f42efbf43c.jpg b/data/2025/2504_13xxx/2504.13181/images/c532d458f803584390cf5e69b8ff8dfe0debb484e426f8af53a4c2f42efbf43c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27baf308be2874f9c592ec8540b79ede004d9c8a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/c532d458f803584390cf5e69b8ff8dfe0debb484e426f8af53a4c2f42efbf43c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e7e55164e7d2746f2fa8b1dff66260d531b91b9c8eef2f0397b4010027b1f92 +size 9469 diff --git a/data/2025/2504_13xxx/2504.13181/images/c6abf374b521c762f6d0b8e1d04cb5578725fcd2cbb8f2abdab0bd9b47747a60.jpg b/data/2025/2504_13xxx/2504.13181/images/c6abf374b521c762f6d0b8e1d04cb5578725fcd2cbb8f2abdab0bd9b47747a60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8290d9e08d0934cc8fcfab17dec963fc958aa66d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/c6abf374b521c762f6d0b8e1d04cb5578725fcd2cbb8f2abdab0bd9b47747a60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13d4d01a27e4f64ed4566cd87a89ba7c578d9596034cc2a938b1a1e77c56edd7 +size 8265 diff --git a/data/2025/2504_13xxx/2504.13181/images/ccbd47c6e6d593acf38cd94b6da64e2f459998bfa155f47040da5a83dd7caea4.jpg b/data/2025/2504_13xxx/2504.13181/images/ccbd47c6e6d593acf38cd94b6da64e2f459998bfa155f47040da5a83dd7caea4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d06748c0a810aa1cd5eb12d93e354241948b9d7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/ccbd47c6e6d593acf38cd94b6da64e2f459998bfa155f47040da5a83dd7caea4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98cf96f928a6e0a2ffb4211656f74778a206889da6d6467e7ea5ee09e9b17eb8 +size 7063 diff --git a/data/2025/2504_13xxx/2504.13181/images/ceb3bd96393f5aff984c5308f6100242887bd14fdb3b1fb4bbfff4196daf8815.jpg b/data/2025/2504_13xxx/2504.13181/images/ceb3bd96393f5aff984c5308f6100242887bd14fdb3b1fb4bbfff4196daf8815.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e44a3feb02a0a85d9c3ce3f88b6c17691f5a68b5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/ceb3bd96393f5aff984c5308f6100242887bd14fdb3b1fb4bbfff4196daf8815.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e45ab641ae2ab2ab9857558ead70bd7103266bf4d9ca2b497da8f01cf948024 +size 39046 diff --git a/data/2025/2504_13xxx/2504.13181/images/cfe4b15691ebaac0d28f672e8a216e8ab712efe6f793dc7edb7c6cbec161fe75.jpg b/data/2025/2504_13xxx/2504.13181/images/cfe4b15691ebaac0d28f672e8a216e8ab712efe6f793dc7edb7c6cbec161fe75.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50ea0ff0c2cceeed59285f1dee69e4e06f873171 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/cfe4b15691ebaac0d28f672e8a216e8ab712efe6f793dc7edb7c6cbec161fe75.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4f73e5803d07b87c49528059065b804235a2b03c73feab2f641b3c54f61fe8 +size 74958 diff --git a/data/2025/2504_13xxx/2504.13181/images/d041c7193a207d97bff0767ce452ad71d55bf14f0d2698e25d892c6237ddce26.jpg b/data/2025/2504_13xxx/2504.13181/images/d041c7193a207d97bff0767ce452ad71d55bf14f0d2698e25d892c6237ddce26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f6b6105bc0f2e8b02778e81db848f3dbc6acbfa --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/d041c7193a207d97bff0767ce452ad71d55bf14f0d2698e25d892c6237ddce26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fce67bfe0c6f44a41042428da26a575f813d384da70929092454c5f2191d2b9d +size 25001 diff --git a/data/2025/2504_13xxx/2504.13181/images/d146e5ba36590a72990779f2d2fff0d2f01b0733d571b364a0116dbed224b453.jpg b/data/2025/2504_13xxx/2504.13181/images/d146e5ba36590a72990779f2d2fff0d2f01b0733d571b364a0116dbed224b453.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0377811a22eceafd77ab324f4c6a780c7af34c3a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/d146e5ba36590a72990779f2d2fff0d2f01b0733d571b364a0116dbed224b453.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5358023ac1b7e886b8ece8e37fe685de3aef4d7283821df9404fa0416eaf564f +size 72601 diff --git a/data/2025/2504_13xxx/2504.13181/images/d1949b98e010fabdd7fb456ec09a255f8c2887f22aad365d1252475c66cdefa8.jpg b/data/2025/2504_13xxx/2504.13181/images/d1949b98e010fabdd7fb456ec09a255f8c2887f22aad365d1252475c66cdefa8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4e2812eafe0d0a55cb0ec30d5ec39355ccf410f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/d1949b98e010fabdd7fb456ec09a255f8c2887f22aad365d1252475c66cdefa8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee5eb98d49eb61332c934da1bed87db60a98406b6b4621901261265026d34420 +size 5059 diff --git a/data/2025/2504_13xxx/2504.13181/images/d4b5d63b0451aae57d67aaa102f3b7248bed9096bcbf90710a982c38dc6e97d3.jpg b/data/2025/2504_13xxx/2504.13181/images/d4b5d63b0451aae57d67aaa102f3b7248bed9096bcbf90710a982c38dc6e97d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23ba0028cba5cc18008855bcac85130c6a0c9be7 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/d4b5d63b0451aae57d67aaa102f3b7248bed9096bcbf90710a982c38dc6e97d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce09b4ce3d6c8ff78d6e9c8ca09d74604b1959effdfbe95ead5f2ada0fd00aba +size 51836 diff --git a/data/2025/2504_13xxx/2504.13181/images/d4c9c0207cfe48c928432e95dc406954c073a838ea4989599c7687b773660fe9.jpg b/data/2025/2504_13xxx/2504.13181/images/d4c9c0207cfe48c928432e95dc406954c073a838ea4989599c7687b773660fe9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d41b620bda20d0d6955348ff88b22afdf50cd527 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/d4c9c0207cfe48c928432e95dc406954c073a838ea4989599c7687b773660fe9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b66bd7801e42240d29a72733af37c12e9be9ec3cb0eee25770f25d6afc1a06bb +size 138071 diff --git a/data/2025/2504_13xxx/2504.13181/images/daad2d2e328b97efc76360b8bc401940c602a89b5d2c0a1b5764d1fc9b3b3d3e.jpg b/data/2025/2504_13xxx/2504.13181/images/daad2d2e328b97efc76360b8bc401940c602a89b5d2c0a1b5764d1fc9b3b3d3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..918111288406484dc4e9d29c66493046d03cc42a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/daad2d2e328b97efc76360b8bc401940c602a89b5d2c0a1b5764d1fc9b3b3d3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d73ad600f54f486ecdc2b697a3d81a0675d856c5f243c35ef90c817b9c76c7c9 +size 9690 diff --git a/data/2025/2504_13xxx/2504.13181/images/dd821677cb7f3f76bd438b9007918a7b8f2f673bbbe2297e30d83246224039ab.jpg b/data/2025/2504_13xxx/2504.13181/images/dd821677cb7f3f76bd438b9007918a7b8f2f673bbbe2297e30d83246224039ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c37a37ab751817967b33c19f10deebe5fee6f70c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/dd821677cb7f3f76bd438b9007918a7b8f2f673bbbe2297e30d83246224039ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98b753081d340e7b6b070e65635d944bedce44da074c07c363f8b01556e1711f +size 8593 diff --git a/data/2025/2504_13xxx/2504.13181/images/ddb05a6977bad6a63462785d89e782e38ab42d14be63d629f21a1828f6517a27.jpg b/data/2025/2504_13xxx/2504.13181/images/ddb05a6977bad6a63462785d89e782e38ab42d14be63d629f21a1828f6517a27.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa963a65f92fc27631c212d4d33af41a0bd83522 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/ddb05a6977bad6a63462785d89e782e38ab42d14be63d629f21a1828f6517a27.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8afa97d7b31993d2b45f86837d28920059a8cae541ac6766f96422c4b02194ba +size 42129 diff --git a/data/2025/2504_13xxx/2504.13181/images/de294a290d605ae638c932a5b630ab3c7b2b5a3f0a9a6ff7d04285e7b65f6ea8.jpg b/data/2025/2504_13xxx/2504.13181/images/de294a290d605ae638c932a5b630ab3c7b2b5a3f0a9a6ff7d04285e7b65f6ea8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24813f7cb630c2e51963e620e0cc1b7332e32973 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/de294a290d605ae638c932a5b630ab3c7b2b5a3f0a9a6ff7d04285e7b65f6ea8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1434fd970d79bba1c4a49be5747a9fbf73346049a7f6043df720b9059452624 +size 13085 diff --git a/data/2025/2504_13xxx/2504.13181/images/e1f6bd1a6e1428498e5dad05ef9684dc27e65ce2230edda4cda2e837c0fd68b8.jpg b/data/2025/2504_13xxx/2504.13181/images/e1f6bd1a6e1428498e5dad05ef9684dc27e65ce2230edda4cda2e837c0fd68b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fb0113c17fed35409f15bc6de4173bbfc2caa1c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/e1f6bd1a6e1428498e5dad05ef9684dc27e65ce2230edda4cda2e837c0fd68b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca1f38d2e1491757c70fab4a3e6307c4ab1a9e4437d56a112445fac77ff54440 +size 17816 diff --git a/data/2025/2504_13xxx/2504.13181/images/e230c7cf4ab7709671d53765702b74a3a0690485de5e999521b719874d64f7c2.jpg b/data/2025/2504_13xxx/2504.13181/images/e230c7cf4ab7709671d53765702b74a3a0690485de5e999521b719874d64f7c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..066c11f04be80403c298e668a55d1dc5691166f4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/e230c7cf4ab7709671d53765702b74a3a0690485de5e999521b719874d64f7c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5c9e84489083d9389a89535438dc46cc1a9c718b564a1a10dcc32e65c42badf +size 60842 diff --git a/data/2025/2504_13xxx/2504.13181/images/ef15fc2bd9455fb292713d568e39c9a6348ca77f140ee0f3585a04410a9ad1b8.jpg b/data/2025/2504_13xxx/2504.13181/images/ef15fc2bd9455fb292713d568e39c9a6348ca77f140ee0f3585a04410a9ad1b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1079594b7ab9214a3a49c784c23408bd74c87f1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/ef15fc2bd9455fb292713d568e39c9a6348ca77f140ee0f3585a04410a9ad1b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3a6bc9161a308647b9191f3f74bcce73978ba6184959316f0c454dc42a2c5d7 +size 15606 diff --git a/data/2025/2504_13xxx/2504.13181/images/f0f9c8d5c4a5a11e170bd6d017535455669fe0b3375e69c34192d6d802625bff.jpg b/data/2025/2504_13xxx/2504.13181/images/f0f9c8d5c4a5a11e170bd6d017535455669fe0b3375e69c34192d6d802625bff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a8a8035871516bbc3aa3ab4421ece85f8d7fd88 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f0f9c8d5c4a5a11e170bd6d017535455669fe0b3375e69c34192d6d802625bff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc89e636a18ef3cd355658e72817c8555fda2c6b505bb47a1e9334d4b8cbfe27 +size 8204 diff --git a/data/2025/2504_13xxx/2504.13181/images/f52605fdf5beeb5ee0955806cc4d9e34acda98833d5b83e553f7a76cb644ecf0.jpg b/data/2025/2504_13xxx/2504.13181/images/f52605fdf5beeb5ee0955806cc4d9e34acda98833d5b83e553f7a76cb644ecf0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90dbb98059765b63304de4c2e15c32e791850901 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f52605fdf5beeb5ee0955806cc4d9e34acda98833d5b83e553f7a76cb644ecf0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b44e01c283f33e0ee26a04ab598886f01bdaed556176520374f305b8795545d +size 6655 diff --git a/data/2025/2504_13xxx/2504.13181/images/f5558127ca340630103f112802d3339a03ea6bb487c5a14602365c00083566cf.jpg b/data/2025/2504_13xxx/2504.13181/images/f5558127ca340630103f112802d3339a03ea6bb487c5a14602365c00083566cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd8faa89031d3ae0af135222401df3cb2cb4ea53 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f5558127ca340630103f112802d3339a03ea6bb487c5a14602365c00083566cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e162eaae2e22eb80692db2eb0984956dacffede98e7c8457d55860b0042a3ac2 +size 8028 diff --git a/data/2025/2504_13xxx/2504.13181/images/f5fca3271c106d2ca387a323bc011e0ec62f8183ffeae011885fd74e2caabfaa.jpg b/data/2025/2504_13xxx/2504.13181/images/f5fca3271c106d2ca387a323bc011e0ec62f8183ffeae011885fd74e2caabfaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89041072e40fef33dc8f393d2fd38aa169e594c9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f5fca3271c106d2ca387a323bc011e0ec62f8183ffeae011885fd74e2caabfaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41a371717b1e90277642f714126f92f36eef995f7a824c1e73b29f788a812c27 +size 106842 diff --git a/data/2025/2504_13xxx/2504.13181/images/f60491c1bd688dfc6de41ba84a4f1eabcb23cc6b5712bda279c86259deaa53bc.jpg b/data/2025/2504_13xxx/2504.13181/images/f60491c1bd688dfc6de41ba84a4f1eabcb23cc6b5712bda279c86259deaa53bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..721a8fcbc549bad92e66df9a32c3f26a89aecf3d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f60491c1bd688dfc6de41ba84a4f1eabcb23cc6b5712bda279c86259deaa53bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80745397dd6a4eabf129bae732dbf965eb2d23fdb60799f8559110e53553731a +size 17805 diff --git a/data/2025/2504_13xxx/2504.13181/images/f6fe4fbb9514dad209e0d83d6a694d7480e58bbba06ff60c92091033b5e14532.jpg b/data/2025/2504_13xxx/2504.13181/images/f6fe4fbb9514dad209e0d83d6a694d7480e58bbba06ff60c92091033b5e14532.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09d6f94dbc1e31965e4f815cf5f6334c7f63c2c9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f6fe4fbb9514dad209e0d83d6a694d7480e58bbba06ff60c92091033b5e14532.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0db9f22b96a6600cb5f7a56e0885630f61badc333c969ab3225326d592de869 +size 5731 diff --git a/data/2025/2504_13xxx/2504.13181/images/f7ec4c21f4db6833171c5782c1dfa0e8273b68efcd9efc14890eb5c453a133c9.jpg b/data/2025/2504_13xxx/2504.13181/images/f7ec4c21f4db6833171c5782c1dfa0e8273b68efcd9efc14890eb5c453a133c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5b51a255cbcf909bfac67e713dd7ba6b8253d84 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/f7ec4c21f4db6833171c5782c1dfa0e8273b68efcd9efc14890eb5c453a133c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:386fcb014bad3842cce6afbead85e5f4aabb0363bc21ad8268a202a4a855885c +size 190641 diff --git a/data/2025/2504_13xxx/2504.13181/images/fc9e5f4a4cb1aee69d2d431bcfb675feed0a0647d32f7580603105b76e8e7e13.jpg b/data/2025/2504_13xxx/2504.13181/images/fc9e5f4a4cb1aee69d2d431bcfb675feed0a0647d32f7580603105b76e8e7e13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c5bb30b688b6a6884f6a819d78343bda086c90f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/fc9e5f4a4cb1aee69d2d431bcfb675feed0a0647d32f7580603105b76e8e7e13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:516a9f6db7b902ac55f8e7068fb62b9562d719dad9e92079bed5dbfad9ae427b +size 11662 diff --git a/data/2025/2504_13xxx/2504.13181/images/fd36dd7301032ff0a40f1dd187ce9e5027a7dfd89ff95854cd039651889d1b0d.jpg b/data/2025/2504_13xxx/2504.13181/images/fd36dd7301032ff0a40f1dd187ce9e5027a7dfd89ff95854cd039651889d1b0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..407691cff34d4607f7f4cc83adb01629fe10ce77 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/images/fd36dd7301032ff0a40f1dd187ce9e5027a7dfd89ff95854cd039651889d1b0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7a97949503446ff3db661bcbbc77bdd1a4e901eb9d1a5f9b2efc669d2096fb2 +size 5605 diff --git a/data/2025/2504_13xxx/2504.13181/layout.json b/data/2025/2504_13xxx/2504.13181/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ccfc780abe4558aa22707b8b6cd8888f7169742b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13181/layout.json @@ -0,0 +1,32301 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 85, + 78, + 509, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 78, + 509, + 119 + ], + "spans": [ + { + "bbox": [ + 85, + 78, + 509, + 119 + ], + "type": "text", + "content": "Perception Encoder: The best visual embeddings are not at the output of the network" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "spans": [ + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": "Daniel Bolya" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Po-Yao Huang" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Peize Sun" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Jang Hyun Cho" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1,2,\\ast,\\dagger}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Andrea Madotto" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Chen Wei" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Tengyu Ma" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Jiale Zhi" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Jathushan Rajasegaran" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Hanoona Rasheed" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{3,\\dagger}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Junke Wang" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{4,\\dagger}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Marco Monteiro" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Hu Xu" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Shiyu Dong" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Nikhila Ravi" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Daniel Li" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Piotr Dólár" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "text", + "content": ", Christoph Feichtenhofer" + }, + { + "bbox": [ + 84, + 122, + 526, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "spans": [ + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "text", + "content": "Meta FAIR, " + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "text", + "content": "UT Austin, " + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "text", + "content": "MBZUAI, " + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "text", + "content": "Fudan University, " + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 85, + 164, + 428, + 190 + ], + "type": "text", + "content": "Meta Reality Labs *Joint first author, †Work done during internships at Meta" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 203, + 526, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 203, + 526, + 384 + ], + "spans": [ + { + "bbox": [ + 83, + 203, + 526, + 384 + ], + "type": "text", + "content": "We introduce Perception Encoder (PE), a state-of-the-art vision encoder for image and video understanding trained via simple vision-language learning. Traditionally, vision encoders have relied on a variety of pretraining objectives, each tailored to specific downstream tasks such as classification, captioning, or localization. Surprisingly, after scaling our carefully tuned image pretraining recipe and refining with our robust video data engine, we find that contrastive vision-language training alone can produce strong, general embeddings for all of these downstream tasks. There is only one caveat: these embeddings are hidden within the intermediate layers of the network. To draw them out, we introduce two alignment methods: language alignment for multimodal language modeling, and spatial alignment for dense prediction. Together, our PE family of models achieves best-in-class results on a wide variety of tasks, including (1) zero-shot image and video classification and retrieval, simultaneously obtaining 86.6 average zero-shot ImageNet robustness and 76.9 zero-shot Kinetics-400 video classification; (2) document, image, and video Q&A, enabling 94.6 DocVQA, 80.9 InfographicVQA, and 82.7 PerceptionTest with an 8B LLM; and (3) spatial tasks such as detection, tracking, and depth estimation, setting a new COCO state-of-the-art of 66.0 box mAP. To foster further research, we release our models, code, and novel dataset of synthetically and human-annotated videos." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 398, + 350, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 398, + 350, + 409 + ], + "spans": [ + { + "bbox": [ + 85, + 398, + 350, + 409 + ], + "type": "text", + "content": "Code: https://github.com/facebookresearch/perception_models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 86, + 411, + 289, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 411, + 289, + 421 + ], + "spans": [ + { + "bbox": [ + 86, + 411, + 289, + 421 + ], + "type": "text", + "content": "Dataset: https://ai.meta.com/datasets/pe-video/" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 480, + 411, + 526, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 480, + 411, + 526, + 422 + ], + "spans": [ + { + "bbox": [ + 480, + 411, + 526, + 422 + ], + "type": "text", + "content": "Meta" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 453, + 166, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 453, + 166, + 467 + ], + "spans": [ + { + "bbox": [ + 67, + 453, + 166, + 467 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 476, + 543, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 476, + 543, + 525 + ], + "spans": [ + { + "bbox": [ + 66, + 476, + 543, + 525 + ], + "type": "text", + "content": "For the last decade in computer vision, pretrained vision encoders have been the core building block for most applications requiring perception. From million-scale ImageNet [26] pretrained convolutional networks [42, 61, 81, 124, 131] to billion-scale web-pretrained transformers [19, 24, 29, 33, 54, 102, 130, 152, 158], the dominant strategy in vision has consistently been to adapt large-scale pretrained encoders to downstream tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 66, + 530, + 543, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 530, + 543, + 615 + ], + "spans": [ + { + "bbox": [ + 66, + 530, + 543, + 615 + ], + "type": "text", + "content": "There are many pretraining objectives today, each with distinct characteristics and each yielding representations better suited for specific tasks: vision-language contrastive losses [106, 160] learn a global vision and language embedding well-suited for zero-shot classification and retrieval as well as provide vision-language alignment for open-world [69, 94] and generative tasks [108, 114]; captioning losses [37, 137] learn to predict image descriptions using a language decoder, which transfers well to downstream multimodal language model (MLLM) tasks; and spatially self-supervised losses [44, 98] learn dense spatial correspondences without language supervision, making them useful for tasks requiring precise localization like object detection." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 66, + 620, + 543, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 620, + 543, + 669 + ], + "spans": [ + { + "bbox": [ + 66, + 620, + 543, + 669 + ], + "type": "text", + "content": "Many works are now attempting to combine two or more of these techniques in different ways [19, 34, 35, 37, 45, 90, 110, 158]. While many have been successful, the complexity of these strategies grows exponentially with number of use cases, which can make scaling difficult. There has not yet been shown a single, simple, and easily scalable pretraining technique that can learn state-of-the-art features for all downstream tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 673, + 544, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 673, + 544, + 723 + ], + "spans": [ + { + "bbox": [ + 66, + 673, + 544, + 723 + ], + "type": "text", + "content": "In this work we discover that global vision-language contrastive learning alone can be one such approach. After building a state-of-the-art contrastive model for image and video, we found a surprising result: inside the model were specific features aligned to OCR, VQA, grounding, detection, depth estimation, and tracking. Compared to the state-of-the-art models with captioning [37] and spatially self-supervised [98] pretraining, our" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 206, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 206, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 206, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.13181v2 [cs.CV] 28 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 62, + 541, + 166 + ], + "blocks": [ + { + "bbox": [ + 69, + 62, + 541, + 166 + ], + "lines": [ + { + "bbox": [ + 69, + 62, + 541, + 166 + ], + "spans": [ + { + "bbox": [ + 69, + 62, + 541, + 166 + ], + "type": "image", + "image_path": "0a3b7c226af8ac168ff6731a42e3b174d5240bddd13e3945533cd8ad5d5e2282.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 170, + 541, + 226 + ], + "lines": [ + { + "bbox": [ + 67, + 170, + 541, + 226 + ], + "spans": [ + { + "bbox": [ + 67, + 170, + 541, + 226 + ], + "type": "text", + "content": "Figure 1 Perception Encoder (PE) is a family of large-scale vision encoder models with state-of-the-art performance on a large variety of vision tasks. By using a robust contrastive pretraining recipe and finetuning on synthetically aligned videos, PE not only outperforms all existing models on classification and retrieval (§2), but it also internally produces strong, general features that scale for downstream tasks (§3). PE unlocks the ability for large-scale contrastive pretraining to transfer to downstream tasks with alignment tuning to capitalize on those general features (§4, §5)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 246, + 541, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 246, + 541, + 307 + ], + "spans": [ + { + "bbox": [ + 66, + 246, + 541, + 307 + ], + "type": "text", + "content": "contrastive encoder has specific layers that, when used as frozen features, matches or exceeds the performance of the other two pretraining techniques on tasks they should be the best at. The only problem is—these features exist at different layers for each task. By exploiting this phenomenon with alignment tuning, we show it is possible to align these features to the end of the network in order to create state-of-the-art encoders for downstream MLLM and spatial tasks—all following the same easily scalable contrastive pretraining." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "spans": [ + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": "We begin by building " + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": " (Fig. 1, left), a large-scale contrastively pretrained model with state-of-the-art zero-shot performance on both images and video (" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\S 2" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": "). To accomplish this, we first focus on developing a strong image-only contrastive pretraining recipe to extract general knowledge from billion-scale image-text data. Keeping the data and training FLOPs fixed, this recipe significantly improves upon vanilla CLIP in both absolute performance and robustness (" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\S 2.1" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": "). We then use the resulting model as a frame-based encoder to develop a video data engine for generating well-aligned video captions. Finetuning on this synthetic video-text data substantially improves performance on both image and video classification and retrieval tasks (" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\S 2.2" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": "). Motivated by this success, we release a large portion of the data used to train the engine: PE Video Dataset (PVD), consisting of 1M diverse videos with 120K human-refined annotations (" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\S 2.3" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": "). Finally, we scale our robust image pretraining and well-aligned video finetuning strategy to 2B parameters to produce " + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "inline_equation", + "content": "\\S 2.4" + }, + { + "bbox": [ + 66, + 312, + 541, + 456 + ], + "type": "text", + "content": "), a single unified encoder that outperforms SigLIP2 [138] on zero-shot image tasks and InternVideo2 [146] on most zero-shot video tasks. We further transfer this power to smaller model scales through distillation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 461, + 541, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 461, + 541, + 558 + ], + "spans": [ + { + "bbox": [ + 66, + 461, + 541, + 558 + ], + "type": "text", + "content": "With the strongest image and video recognition model in hand, we shift our focus to downstream tasks. Remarkably, despite being pretrained with CLIP loss, we find that the intermediate layers of " + }, + { + "bbox": [ + 66, + 461, + 541, + 558 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 461, + 541, + 558 + ], + "type": "text", + "content": " can rival AIMv2-3B [37] on language tasks and DINOv2-g [98] on spatial tasks, both of which among the strongest pretrained models in their respective domains. Upon investigation, we attribute this capability to our robust image pretraining strategy, which appears to have unlocked the potential of contrastive pretraining to scale effectively for downstream tasks (§3). However, a challenge remains: the model does not naturally output these features, keeping them hidden internally. To address this, we introduce two alignment tuning methods (Fig. 1, right) to extract these strong, general features." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 563, + 541, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 563, + 541, + 612 + ], + "spans": [ + { + "bbox": [ + 66, + 563, + 541, + 612 + ], + "type": "text", + "content": "First, in §4, we investigate the most effective technique to align features to the end of the network by adapting to a large language model. This language alignment enables us to construct " + }, + { + "bbox": [ + 66, + 563, + 541, + 612 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 563, + 541, + 612 + ], + "type": "text", + "content": ", which individually outperforms all other popular vision encoders for MLLM tasks. Moreover, when paired with our Perception Language Model (PLM) [21], the combination rivals the latest state-of-the-art MLLMs, like InternVL3 [168]" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 616, + 541, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 616, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 66, + 616, + 541, + 689 + ], + "type": "text", + "content": "Second, in §5, we identify a dichotomy in the layers optimal for spatial tasks. By visualizing the features and pinpointing the explicit reason for this dichotomy, we develop a straightforward spatial alignment approach: distilling from the model's own frozen features to achieve most of the alignment, complemented by a novel use of SAM 2 [111] for spatial correspondence distillation to refine the process. The resulting " + }, + { + "bbox": [ + 66, + 616, + 541, + 689 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 616, + 541, + 689 + ], + "type": "text", + "content": " not only outperforms other popular models in depth estimation, tracking, and semantic segmentation, but also sets a new absolute state-of-the-art on COCO [76] detection with a much simpler decoder." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 694, + 541, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 694, + 541, + 718 + ], + "spans": [ + { + "bbox": [ + 66, + 694, + 541, + 718 + ], + "type": "text", + "content": "With this family of checkpoints, Perception Encoder unlocks the potential to scale one simple pretraining method to solve many downstream vision tasks. We are releasing our models, code, and PE Video Dataset." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 249, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 249, + 79 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 249, + 79 + ], + "type": "text", + "content": "2 Perception Encoder: Core" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 85, + 543, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 85, + 543, + 121 + ], + "spans": [ + { + "bbox": [ + 66, + 85, + 543, + 121 + ], + "type": "text", + "content": "To build Perception Encoder (PE), we start by training a large-scale, robust, and highly performant vision-language contrastive model for image and video. We have two objectives: first, to enhance the scalability and data efficiency of contrastive training; and second, to create a unified model effective on both image and video." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 127, + 543, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 127, + 543, + 213 + ], + "spans": [ + { + "bbox": [ + 66, + 127, + 543, + 213 + ], + "type": "text", + "content": "These goals are somewhat conflicting: image-text data is plentiful and training on images is efficient, but video-text data is scarce and video training is expensive. Thus, we decouple image and video training into two stages. We first develop a strong image pretraining recipe (§2.1) with several regularization techniques to create a robust starting point. Then we use the resulting image model as a frame encoder to develop a video data engine (§2.2) supported by our novel human-refined video-text dataset (§2.3) to generate aligned captions for video clips. Finally, we finetune the image encoder on the resulting aligned video data (§2.4). Using our data engine design, this short finetuning step substantially improves both image and video performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 221, + 224, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 221, + 224, + 234 + ], + "spans": [ + { + "bbox": [ + 67, + 221, + 224, + 234 + ], + "type": "text", + "content": "2.1 Robust Image Pretraining" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 240, + 544, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 240, + 544, + 312 + ], + "spans": [ + { + "bbox": [ + 66, + 240, + 544, + 312 + ], + "type": "text", + "content": "In the first stage of pretraining, we want to learn as much visual information as possible from a large set of image-text data. Notably, a unique quirk of contrastive training is the loss for a given sample depends on the other samples in the batch. Because each batch is different, there is potential to learn new information every time an example is sampled, even if that sample has been seen before. Thus, we find contrastive learning to benefit from a long training schedule. To exploit this, we design our pretraining recipe with high regularization, stability, and training efficiency in mind." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 321, + 269, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 321, + 269, + 573 + ], + "spans": [ + { + "bbox": [ + 67, + 321, + 269, + 573 + ], + "type": "text", + "content": "Setup. (Fig. 2.1) We track our changes on a vanilla CLIP model using an OpenCLIP [51] ViT-L/14 model at 224 resolution as a baseline. We keep the training budget fixed to around 1T GFLOPs (i.e., a ZFLOP), and train on a fixed 2.3B image-text dataset curated using the MetaCLIP [152] text-only curation pipeline. For the baseline, we use a global batch size of " + }, + { + "bbox": [ + 67, + 321, + 269, + 573 + ], + "type": "inline_equation", + "content": "32\\mathrm{K}" + }, + { + "bbox": [ + 67, + 321, + 269, + 573 + ], + "type": "text", + "content": ", class token, AdamW [83], and train for 12B samples seen. To assess the generality of the information learned during pretraining, we report not only zero-shot ImageNet val [26] results but also the average performance across a range of robustness metrics, including ImageNet val [26], ImageNet v2 [112], ObjectNet [4], ImageNet Adversarial [47], ImageNet Rendition [46], and ImageNet Sketch [143]. As observed with other pure CLIP models [33, 106, 152], the average robustness metric performance of this vanilla recipe is much lower than ImageNet val alone." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 288, + 312, + 547, + 463 + ], + "blocks": [ + { + "bbox": [ + 288, + 312, + 547, + 463 + ], + "lines": [ + { + "bbox": [ + 288, + 312, + 547, + 463 + ], + "spans": [ + { + "bbox": [ + 288, + 312, + 547, + 463 + ], + "type": "image", + "image_path": "ceb3bd96393f5aff984c5308f6100242887bd14fdb3b1fb4bbfff4196daf8815.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 280, + 473, + 542, + 573 + ], + "lines": [ + { + "bbox": [ + 280, + 473, + 542, + 573 + ], + "spans": [ + { + "bbox": [ + 280, + 473, + 542, + 573 + ], + "type": "text", + "content": "Figure 2 Robust Image Pretraining. We tune our pretraining recipe (§2.1) to maximize performance on a fixed set of data, starting with an OpenCLIP [51] ViT-L/14 model. We report cumulative zero-shot classification results for each modification. The inner bars show robustness evaluation, calculated as the average of 6 robustness benchmarks [4, 26, 46, 47, 112, 143], and the outer bars show ImageNet val [26] alone. Several changes significantly improve robustness, indicating that ImageNet val scales more with data, while robustness can scale with refined training techniques." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 582, + 542, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 582, + 542, + 631 + ], + "spans": [ + { + "bbox": [ + 66, + 582, + 542, + 631 + ], + "type": "text", + "content": "Progressive Resolution. (Fig. 2.2) To enable longer training, we first improve training efficiency. As shown in many works [70, 71, 79, 131, 136], vision encoders work well with a progressively increasing resolution schedule. Thus, we halve the training FLOPs while maintaining performance by evenly splitting the baseline 12B-sample run into 98, 154, and 224 resolution stages, with 4B samples per stage." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "spans": [ + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "text", + "content": "Increasing Batch Size. (Fig. 2.3) We use the extra budget to double the batch size from " + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "inline_equation", + "content": "32\\mathrm{K}" + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "inline_equation", + "content": "64\\mathrm{K}" + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "text", + "content": ", increasing the total samples seen from 12B to 24B. Larger batch size means a higher likelihood for there to be a non-trivially novel pair of samples, i.e., hard negatives. This is akin to increasing the \"task difficulty\" of CLIP and improves ImageNet val by " + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "inline_equation", + "content": "+0.6\\%" + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "text", + "content": " and robustness by double of that, " + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "inline_equation", + "content": "+1.1\\%" + }, + { + "bbox": [ + 66, + 639, + 542, + 688 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 66, + 696, + 543, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 696, + 543, + 723 + ], + "spans": [ + { + "bbox": [ + 66, + 696, + 543, + 723 + ], + "type": "text", + "content": "LAMB Optimizer. (Fig. 2.4) We switch from AdamW to LAMB [156], which is known to stabilize large batch training. More importantly, LAMB allows us to train stably with a higher learning rate of " + }, + { + "bbox": [ + 66, + 696, + 543, + 723 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-3}" + }, + { + "bbox": [ + 66, + 696, + 543, + 723 + ], + "type": "text", + "content": " compared" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "text", + "content": "to the original " + }, + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "text", + "content": ". We observe that starting with a high learning rate is important to allow the model to adapt to different resolutions. These factors combine for " + }, + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "inline_equation", + "content": "+0.4\\%" + }, + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "text", + "content": " on ImageNet val and " + }, + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "inline_equation", + "content": "+0.7\\%" + }, + { + "bbox": [ + 67, + 64, + 543, + 88 + ], + "type": "text", + "content": " on robustness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "spans": [ + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "type": "text", + "content": "Increasing Final Resolution. (Fig. 2.5) A classic finding is that parameters and resolution should be scaled together [36, 131]. Thus, we add a fourth 336 resolution stage at the end of training. To keep the training FLOPs the same, we adjust the training schedule to 10B samples at 98 resolution, 8B at 154, 4B at 224, and 2B at 336. While ImageNet val only increases by " + }, + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "type": "inline_equation", + "content": "+0.5\\%" + }, + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "type": "text", + "content": ", robustness improves threefold, rising by " + }, + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "type": "inline_equation", + "content": "+1.4\\%" + }, + { + "bbox": [ + 67, + 97, + 543, + 146 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "spans": [ + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "type": "text", + "content": "RoPE. (Fig. 2.6) We add 2D RoPE [127] to each attention layer to improve extrapolation, keeping the original position embedding. 2D RoPE only improves ImageNet val by " + }, + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "type": "inline_equation", + "content": "+0.3\\%" + }, + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "type": "text", + "content": " but enhances robustness by " + }, + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "type": "inline_equation", + "content": "+0.9\\%" + }, + { + "bbox": [ + 67, + 155, + 543, + 179 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "spans": [ + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "type": "text", + "content": "Attention Pooling. (Fig. 2.7) We follow [160] in constructing the CLIP embedding using an attention probing transformer block. Surprisingly, we found keeping the class token as an input to this block is important for small model performance. Together, this improves ImageNet val by " + }, + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "type": "inline_equation", + "content": "+0.3\\%" + }, + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "type": "text", + "content": " and robustness by " + }, + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "type": "inline_equation", + "content": "+0.9\\%" + }, + { + "bbox": [ + 67, + 188, + 543, + 225 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "spans": [ + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "type": "text", + "content": "Tuned Data Augmentation. (Fig. 2.8) Despite training on billions of samples, we find data augmentation still important—especially for transfer to unlikely scenarios like in ObjectNet [4]. We add heavy random cropping, brightness/saturation jitter, and horizontal flip. Random cropping encourages using the entire caption, as not everything is in frame. Jitter helps low-light settings and documents. Horizontal flip improves natural images and does not hurt OCR (see §2.5). These improve robustness by " + }, + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "type": "inline_equation", + "content": "+0.7\\%" + }, + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "type": "text", + "content": ", notably, ObjectNet by " + }, + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "type": "inline_equation", + "content": "+2.4\\%" + }, + { + "bbox": [ + 67, + 234, + 543, + 295 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 303, + 543, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 303, + 543, + 364 + ], + "spans": [ + { + "bbox": [ + 67, + 303, + 543, + 364 + ], + "type": "text", + "content": "Mask Regularization. (Fig. 2.9) As regularization, we want the model to produce the same features if some patches are not visible. However, passing the CLIP gradients through masked images may negatively alter behavior on unmasked images. Thus, we convert MaskFeat [147] into a regularization loss by duplicating and masking 1/16th of the batch. At the output, the masked tokens are aligned to their unmasked counterparts by maximizing cosine similarity. Care is taken to ensure that the CLIP and masked gradients are disjoint." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 372, + 543, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 372, + 543, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 372, + 543, + 433 + ], + "type": "text", + "content": "Scaling Behavior. (Figs. 3 and 4) In Fig. 3, we show the performance of our recipe (Fig. 2.9) vs. the original CLIP recipe (Fig. 2.1) across S/14, B/14, and L/14 models. For each benchmark, our recipe scales around the same rate or better than the original CLIP recipe. On some difficult datasets like ObjectNet [4] and ImageNet Adversarial [47], our recipe shows distinctly better scaling. This indicates that the improvements in performance were not at the cost of scalability, meaning we can further benefit from scaling the model size." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 72, + 442, + 148, + 504 + ], + "blocks": [ + { + "bbox": [ + 72, + 442, + 148, + 504 + ], + "lines": [ + { + "bbox": [ + 72, + 442, + 148, + 504 + ], + "spans": [ + { + "bbox": [ + 72, + 442, + 148, + 504 + ], + "type": "image", + "image_path": "248bf47810a642cd188b3f5505507120452fc477d1e98c80ff4049a8ab332782.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 510, + 543, + 533 + ], + "lines": [ + { + "bbox": [ + 67, + 510, + 543, + 533 + ], + "spans": [ + { + "bbox": [ + 67, + 510, + 543, + 533 + ], + "type": "text", + "content": "Figure 3 Scaling Behavior (Model Size). Results before and after our recipe changes (Fig. 2) for S/14, B/14, and L/14 models. Our recipe improves scaling for difficult metrics like ObjectNet [4] and ImageNet Adeversarial [47]." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 150, + 442, + 225, + 504 + ], + "blocks": [ + { + "bbox": [ + 150, + 442, + 225, + 504 + ], + "lines": [ + { + "bbox": [ + 150, + 442, + 225, + 504 + ], + "spans": [ + { + "bbox": [ + 150, + 442, + 225, + 504 + ], + "type": "image", + "image_path": "d1949b98e010fabdd7fb456ec09a255f8c2887f22aad365d1252475c66cdefa8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 228, + 442, + 304, + 504 + ], + "blocks": [ + { + "bbox": [ + 228, + 442, + 304, + 504 + ], + "lines": [ + { + "bbox": [ + 228, + 442, + 304, + 504 + ], + "spans": [ + { + "bbox": [ + 228, + 442, + 304, + 504 + ], + "type": "image", + "image_path": "49aa7f6eea2b488de6007811ec74e7125803de849e677ead4a356be63e1d3d17.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 442, + 381, + 504 + ], + "blocks": [ + { + "bbox": [ + 307, + 442, + 381, + 504 + ], + "lines": [ + { + "bbox": [ + 307, + 442, + 381, + 504 + ], + "spans": [ + { + "bbox": [ + 307, + 442, + 381, + 504 + ], + "type": "image", + "image_path": "fd36dd7301032ff0a40f1dd187ce9e5027a7dfd89ff95854cd039651889d1b0d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 386, + 442, + 460, + 504 + ], + "blocks": [ + { + "bbox": [ + 386, + 442, + 460, + 504 + ], + "lines": [ + { + "bbox": [ + 386, + 442, + 460, + 504 + ], + "spans": [ + { + "bbox": [ + 386, + 442, + 460, + 504 + ], + "type": "image", + "image_path": "3c29c8dd5ab4adbf5915fbfd3e6f44dbee77cedbf231bae611128ee005d47ba6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 463, + 442, + 539, + 504 + ], + "blocks": [ + { + "bbox": [ + 463, + 442, + 539, + 504 + ], + "lines": [ + { + "bbox": [ + 463, + 442, + 539, + 504 + ], + "spans": [ + { + "bbox": [ + 463, + 442, + 539, + 504 + ], + "type": "image", + "image_path": "2c2bdea6917b4da14b0cfaa830be0cc38860e78457896c6a78d058c5db5e611e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 545, + 543, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 545, + 543, + 606 + ], + "spans": [ + { + "bbox": [ + 67, + 545, + 543, + 606 + ], + "type": "text", + "content": "In Fig. 4, we additionally show the performance of our recipe vs. the original CLIP recipe across L/14 models trained with 120K steps (one-third schedule), 240K steps (two-thirds schedule), and 360K steps (full ablation schedule). All models are their own training runs with full learning rate annealing and the progressive resolution schedule adjusted proportionally. We see nearly linear trends for our recipe on most datasets. This suggests we can train longer for more performance, even at L scale and with 24B samples seen already." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 72, + 616, + 148, + 677 + ], + "blocks": [ + { + "bbox": [ + 72, + 616, + 148, + 677 + ], + "lines": [ + { + "bbox": [ + 72, + 616, + 148, + 677 + ], + "spans": [ + { + "bbox": [ + 72, + 616, + 148, + 677 + ], + "type": "image", + "image_path": "553115ec45af9f6b65240ce997ff35a15124c9f853156f60dd5b3a711732ed88.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 684, + 543, + 717 + ], + "lines": [ + { + "bbox": [ + 67, + 684, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 684, + 543, + 717 + ], + "type": "text", + "content": "Figure 4 Scaling Behavior (Training Steps). Results before and after our recipe changes for an L/14 model trained with 120K, 240K, and 360K steps, adjusting the learning rate and progressive resolution schedules accordingly. Despite our recipe being much stronger than the original, there is still room for further improvement by training longer." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 150, + 616, + 226, + 677 + ], + "blocks": [ + { + "bbox": [ + 150, + 616, + 226, + 677 + ], + "lines": [ + { + "bbox": [ + 150, + 616, + 226, + 677 + ], + "spans": [ + { + "bbox": [ + 150, + 616, + 226, + 677 + ], + "type": "image", + "image_path": "4196311b231b92cff14d1bbf10a6730543bbd841e4e2ac323e2216df1265ffdd.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 230, + 616, + 304, + 677 + ], + "blocks": [ + { + "bbox": [ + 230, + 616, + 304, + 677 + ], + "lines": [ + { + "bbox": [ + 230, + 616, + 304, + 677 + ], + "spans": [ + { + "bbox": [ + 230, + 616, + 304, + 677 + ], + "type": "image", + "image_path": "82b6d68c90e01f9d4518fb5aa8723e794719fc68e52e2d11a08233decc536e99.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 307, + 616, + 383, + 677 + ], + "blocks": [ + { + "bbox": [ + 307, + 616, + 383, + 677 + ], + "lines": [ + { + "bbox": [ + 307, + 616, + 383, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 616, + 383, + 677 + ], + "type": "image", + "image_path": "f6fe4fbb9514dad209e0d83d6a694d7480e58bbba06ff60c92091033b5e14532.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 386, + 616, + 461, + 677 + ], + "blocks": [ + { + "bbox": [ + 386, + 616, + 461, + 677 + ], + "lines": [ + { + "bbox": [ + 386, + 616, + 461, + 677 + ], + "spans": [ + { + "bbox": [ + 386, + 616, + 461, + 677 + ], + "type": "image", + "image_path": "1f522eeb6904381c6bc6ab80156024d78351fcf08325c0327bae44b86bea83f4.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 463, + 616, + 539, + 677 + ], + "blocks": [ + { + "bbox": [ + 463, + 616, + 539, + 677 + ], + "lines": [ + { + "bbox": [ + 463, + 616, + 539, + 677 + ], + "spans": [ + { + "bbox": [ + 463, + 616, + 539, + 677 + ], + "type": "image", + "image_path": "9f90e426884e9ecb7989d4a28e0d99bfb2443af89b14067a3357237e5afd2003.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 63, + 400, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 63, + 400, + 77 + ], + "spans": [ + { + "bbox": [ + 67, + 63, + 400, + 77 + ], + "type": "text", + "content": "2.2 Bootstrapping a Video Data Engine with Perception Encoder" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 83, + 219, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 83, + 219, + 285 + ], + "spans": [ + { + "bbox": [ + 67, + 83, + 219, + 285 + ], + "type": "text", + "content": "With a robust image pretraining recipe settled and its scaling behavior confirmed, our next step is to extend the image-only encoder to accommodate video and build a unified image-video model. Unlike web-scale image-text data, which comes in many cases with human-generated descriptive alt-text information, videos with aligned language annotation are inherently scarce. High-quality human-annotated captions for videos are even rarer. This scarcity presents a unique and significant challenge in training encoders capable of effectively processing video inputs." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 235, + 102, + 542, + 217 + ], + "blocks": [ + { + "bbox": [ + 235, + 102, + 542, + 217 + ], + "lines": [ + { + "bbox": [ + 235, + 102, + 542, + 217 + ], + "spans": [ + { + "bbox": [ + 235, + 102, + 542, + 217 + ], + "type": "image", + "image_path": "16aa33f83b9a7e2879ae72919ee8bd13e2c641010eaf3774f481b59e4a45d689.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 231, + 221, + 542, + 277 + ], + "lines": [ + { + "bbox": [ + 231, + 221, + 542, + 277 + ], + "spans": [ + { + "bbox": [ + 231, + 221, + 542, + 277 + ], + "type": "text", + "content": "Figure 5 Video Data Engine. To create aligned video-text data for contrastive training, we use a PE-based video captioner [21] to generate a holistic video caption and an image-level captioner [82] on sampled frames. We then provide those captions as well as the original video metadata to text-only LLM [82] to synthesize a single short, aligned caption optimal for contrastive training." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 285, + 542, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 542, + 334 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 542, + 334 + ], + "type": "text", + "content": "Inspired by the recent success of image data engines [58, 64, 96, 111, 151], we extend this concept to develop a robust video data engine that generates well-aligned synthetic captions for a diverse set of videos, facilitating the training of a video encoder. This innovative approach represents the first large-scale exploration of its kind. In the following sections, we introduce the process of building our video data engine." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 339, + 543, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 339, + 543, + 401 + ], + "spans": [ + { + "bbox": [ + 67, + 339, + 543, + 401 + ], + "type": "text", + "content": "To bootstrap our contrastive video finetuning, we focus on synthesizing video captions. We build our data engine in three stages: (1) we create a strong baseline video captioner, which we call the Perception Language Model (PLM), described in [21]; (2) we add additional high quality video data with human-refined captions to further enhance the captioner's quality; (3) we refine and summarize the generated video captions with an LLM to construct a large video dataset to use for the contrastive video finetuning of our Perception Encoder." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 408, + 543, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 408, + 543, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 408, + 543, + 458 + ], + "type": "text", + "content": "Phase 1: Base Video Captioner (PLM). We build our data engine on an early version of PLM [21], a multimodal large language model with PE as the vision encoder and Llama [82] as the language decoder. We train PLM on a large-scale collection of open-access image and video datasets [21]. In total, the training dataset consists of 64.7M images and videos covering natural images, charts, documents, exocentric and egocentric videos." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 466, + 290, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 466, + 290, + 551 + ], + "spans": [ + { + "bbox": [ + 67, + 466, + 290, + 551 + ], + "type": "text", + "content": "Phase 2: PLM + Refined Data. To further boost captioning performance, we collect a set of 265K videos (105K from PVD which we release, see §2.3), caption them with our base PLM model, and ask human raters to refine the captions1. We then fine-tune our base PLM model with this data, significantly improving captioning quality (see Tab. 1)." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 305, + 466, + 537, + 502 + ], + "blocks": [ + { + "bbox": [ + 305, + 466, + 537, + 502 + ], + "lines": [ + { + "bbox": [ + 305, + 466, + 537, + 502 + ], + "spans": [ + { + "bbox": [ + 305, + 466, + 537, + 502 + ], + "type": "table", + "html": "
CaptionerAuroraCap [13]VCG Diverse [87]VCG Bench [86] Score
ScoreAccScoreAcc
PLM2.251.93.165.134.3
PLM + Human-Refined Data3.471.13.679.435.2
", + "image_path": "754240b0fdfb6b195dadcbc2f6c7fd2fc9c772c307dec34acb8e1c27fc16616c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 505, + 543, + 550 + ], + "lines": [ + { + "bbox": [ + 302, + 505, + 543, + 550 + ], + "spans": [ + { + "bbox": [ + 302, + 505, + 543, + 550 + ], + "type": "text", + "content": "Table 1 Video Captioning. We use an early version of PLM-8B [21], consisting of our image-only PE encoder and a Llama decoder, for captioning. Adding human-refined data greatly boosts captioning performance (higher is better)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 559, + 542, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 542, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 542, + 632 + ], + "type": "text", + "content": "Phase 3: LLM Summarization. We synthesize the final aligned video captions by incorporating the PLM video captions, Llama 3.2 [82] image-only frame captions, and the existing video metadata of video titles and descriptions (Fig. 5). Similar to image alt-text, video metadata contains knowledge often not covered by the image and video captioning models. Thus, combining the two leads to more comprehensive captions. We summarize video captions, frame captions, and video metadata together using the Llama 3.3 70B model to provide the final captions. The prompt used to generate the summary can be found in Appendix A.1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 640, + 543, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 543, + 665 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 543, + 665 + ], + "type": "text", + "content": "Using the Engine. Finally, we use the resulting data engine bootstrapped with an image-only checkpoint of PE to generate well-aligned, information-dense captions for a diverse set of 22M videos for contrastive finetuning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 673, + 543, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 543, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 543, + 698 + ], + "type": "text", + "content": "Training with Recaptioned Videos. Our goal is to develop a unified image and video encoder. To encode videos using our existing image encoder, we uniformly sample " + }, + { + "bbox": [ + 67, + 673, + 543, + 698 + ], + "type": "inline_equation", + "content": "N = 8" + }, + { + "bbox": [ + 67, + 673, + 543, + 698 + ], + "type": "text", + "content": " frames from video clips and extract frame-level" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 704, + 419, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 704, + 419, + 715 + ], + "spans": [ + { + "bbox": [ + 78, + 704, + 419, + 715 + ], + "type": "text", + "content": "1The annotators are instructed to remove, correct, and add information from the captions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "text", + "content": "embeddings with the image encoder. We then apply average pooling over these frame embeddings to obtain video embeddings, which are used for contrastive learning with encoded video captions by the text encoder. Despite being extremely simple, we find this technique surprisingly effective in producing a strong joint image-video encoder. We share this finding with previous studies [19, 84], which note that simple average pooling outperforms more complex pooling strategies like attention-based compression for video." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 133, + 265, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 265, + 324 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 265, + 324 + ], + "type": "text", + "content": "Ablations. In Tab. 2, we conduct an ablation study on the components of the video data engine by finetuning an intermediate image-only checkpoint on 17M of the 22M videos recaptioned by our video data engine. The results show that the video data engine significantly enhances zero-shot classification and retrieval performance for both image and video benchmarks, compared to the image-only baseline encoder (first row). Notably, using the video data engine's video-level and frame-level captions provides significant improvements over relying solely on metadata such as video title and description (second row), highlighting the importance of building a robust video data engine to compensate for noise in web videos." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 282, + 137, + 539, + 224 + ], + "blocks": [ + { + "bbox": [ + 282, + 137, + 539, + 224 + ], + "lines": [ + { + "bbox": [ + 282, + 137, + 539, + 224 + ], + "spans": [ + { + "bbox": [ + 282, + 137, + 539, + 224 + ], + "type": "table", + "html": "
TitleDescriptionVideo CaptionFrame CaptionAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet vial [26]ImageNet vial [12]ObjectNet IV Classes [4]MS-COCO mrt→img [76]MS-COCO mrt→mrt [76]Average VideoKinetics 400 [55]Kinetics 400 [55]MSR-VTT mrt→vid [153]MSR-VTT mrt→mrt [153]48.1
72.683.377.885.849.466.850.969.768.438.027.3
75.483.278.287.147.366.056.074.173.539.037.3
78.283.578.486.856.074.360.973.873.447.648.8
✓*78.183.779.087.754.173.060.975.475.146.746.5
78.283.779.087.554.673.261.675.875.547.448.1
", + "image_path": "98121d3bed5a35310ba152c9861be31ac69dd8a0d1f018191a8a6603f9f86662.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 279, + 227, + 542, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 227, + 542, + 316 + ], + "spans": [ + { + "bbox": [ + 279, + 227, + 542, + 316 + ], + "type": "text", + "content": "Table 2 Video Data Engine Ablation. We ablate our video data engine in Fig. 5 by finetuning on an in-development image-only version of PE by averaging the frame embeddings to create a single video CLIP embedding. Video captions are generated by PLM trained with or without * human-refined data (see §2.3). Frame captions are generated by the Llama 3.2 vision model. Each component helps on different metrics, overall culminating in a huge boost to both image and video zero-shot performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 325, + 543, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 325, + 543, + 350 + ], + "spans": [ + { + "bbox": [ + 67, + 325, + 543, + 350 + ], + "type": "text", + "content": "Our analysis reveals that the most critical components are the video metadata and PLM's video caption; however, all components are necessary to achieve peak performance in our video data engine." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 354, + 543, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 543, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 543, + 392 + ], + "type": "text", + "content": "In Fig. 6, we investigate the impact of scaling recaptioned video data on a later checkpoint of the same image-only model as in Fig. 2. Notably, scaling synthetic video data demonstrates consistent improvement in both image and video benchmarks. Full results of this scaling experiment can be found in the Appendix 19." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": "In the top row, scaling synthetic video data consistently improves performance on image benchmarks, with monotonic improvements of " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "+1.1\\%" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": " in ObjectNet and " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "+1.6\\%" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": " in ImageNet Adversarial. ImageNet val and ImageNet v2 have smaller gains, with accuracy increases of " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "0.3\\%" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": ", plateauing at " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "\\sim 7\\mathrm{M}" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": " samples. We also observe a significant boost to zero-shot retrieval (here, COCO [76]) of " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "+3.8\\%" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "inline_equation", + "content": "+4.1\\%" + }, + { + "bbox": [ + 67, + 396, + 543, + 445 + ], + "type": "text", + "content": " top-1 recall." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "spans": [ + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "text", + "content": "The video tasks listed in the bottom row demonstrate a consistent story. We observe a significant jump in performance between none and 3M videos across all video classification tasks, indicating that there is a domain gap for image-only models that hinders their ability to perform well on video out of the box. Further scaling synthetic video data leads to substantial performance gains in both video classification and retrieval. Video classification accuracy improves consistently by " + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "inline_equation", + "content": "+5.6\\%" + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "inline_equation", + "content": "+11.7\\%" + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "text", + "content": " without plateauing, while video retrieval shows significant improvements of " + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "inline_equation", + "content": "+7.7" + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "inline_equation", + "content": "+15.3" + }, + { + "bbox": [ + 67, + 450, + 543, + 522 + ], + "type": "text", + "content": " top-1 recall." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 75, + 538, + 536, + 683 + ], + "blocks": [ + { + "bbox": [ + 75, + 538, + 536, + 683 + ], + "lines": [ + { + "bbox": [ + 75, + 538, + 536, + 683 + ], + "spans": [ + { + "bbox": [ + 75, + 538, + 536, + 683 + ], + "type": "image", + "image_path": "d4b5d63b0451aae57d67aaa102f3b7248bed9096bcbf90710a982c38dc6e97d3.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 689, + 542, + 711 + ], + "lines": [ + { + "bbox": [ + 67, + 689, + 542, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 542, + 711 + ], + "type": "text", + "content": "Figure 6 Video Data Scaling. Finetuning on videos recaptioned by the PE video data engine from 0M (baseline image-only model) to 17M samples consistently improves both image and video performance, both classification and retrieval." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 95, + 538, + 152 + ], + "blocks": [ + { + "bbox": [ + 72, + 95, + 538, + 152 + ], + "lines": [ + { + "bbox": [ + 72, + 95, + 538, + 152 + ], + "spans": [ + { + "bbox": [ + 72, + 95, + 538, + 152 + ], + "type": "image", + "image_path": "9ec94d9a6be8e905327609a5476b63a26195687f800eb8a20f47f4a56e5d1b8b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 155, + 353, + 164 + ], + "lines": [ + { + "bbox": [ + 257, + 155, + 353, + 164 + ], + "spans": [ + { + "bbox": [ + 257, + 155, + 353, + 164 + ], + "type": "text", + "content": "Category: Food Preparation" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 245, + 542, + 289 + ], + "lines": [ + { + "bbox": [ + 67, + 245, + 542, + 289 + ], + "spans": [ + { + "bbox": [ + 67, + 245, + 542, + 289 + ], + "type": "text", + "content": "Figure 7 PE Video Dataset Example. A sample from PVD, our released video-text dataset. Initial captions are generated by our video captioning model and then refined by human annotators. Annotators are instructed to add details and remove model hallucination. In this example, the model hallucination \"a spoon\" is removed; and more details such as \"glass bowl\" and the action \"scraping\" are added. See Appendix Fig. 18 for more." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 168, + 294, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 168, + 294, + 236 + ], + "spans": [ + { + "bbox": [ + 78, + 168, + 294, + 236 + ], + "type": "text", + "content": "Model caption: \"The video shows a person making a cake. The person is seen mixing ingredients in a bowl, pouring the mixture into a pan, and adding chocolate chips. The video also shows the person using a whisk and a spatula to mix the ingredients and pour the mixture into the pan. The person is wearing a blue shirt and is seen holding a glass bowl and a spoon. The background of the video is a white marble countertop, and there is a striped towel on the counter.\"" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 309, + 168, + 535, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 168, + 535, + 237 + ], + "spans": [ + { + "bbox": [ + 309, + 168, + 535, + 237 + ], + "type": "text", + "content": "Human refinement: \"The video shows a person making a chocolate cake. The person mixes ingredients in a glass bowl and pours the mixture into a baking pan. Then, the person adds chocolate chips on top. The video also shows the person mixing the ingredients with an electric whisk and scraping the mixture into the pan with a spatula. The person is wearing a blue shirt and is seen holding a glass bowl. The background of the video is a white marble countertop, and there is a striped towel on the counter.\"" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 311, + 543, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 311, + 543, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 311, + 543, + 348 + ], + "type": "text", + "content": "These experiments highlight the quality of our video data engine and its ability to significantly improve encoder performance, even with only a relatively modest 17M videos compared to the billions of images seen during pretraining. Our video data engine is a vital component in build a strong, unified image-video encoder." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 361, + 218, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 218, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 218, + 373 + ], + "type": "text", + "content": "2.3 PE Video Dataset (PVD)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 381, + 543, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 381, + 543, + 418 + ], + "spans": [ + { + "bbox": [ + 67, + 381, + 543, + 418 + ], + "type": "text", + "content": "For the benefit of the community, we release a new video dataset: PE Video Dataset (PVD).2 PVD comprises of 1M high-quality and diverse videos with accompanying tags and descriptions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 424, + 542, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 424, + 542, + 484 + ], + "spans": [ + { + "bbox": [ + 67, + 424, + 542, + 484 + ], + "type": "text", + "content": "We additionally select 120K of these videos with the highest degree of motion to annotate with detailed captions by generating synthetic captions using our video captioner (§2.2) and employing 200 annotators to verify and refine them. We ask the human annotators to improve the synthetic captions by removing any hallucinations, correcting words that describe the video inaccurately, eliminating repetitive or redundant words to make the caption more concise, and adding any missing actions being performed in the video." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 490, + 407, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 490, + 407, + 561 + ], + "spans": [ + { + "bbox": [ + 67, + 490, + 407, + 561 + ], + "type": "text", + "content": "We release two versions of annotations for the 120K PVD subset: (1) Human verified captions: extended summaries with an average length of 57.1 words that provide a high-level description of each video. These captions are suitable for CLIP-style training. (2) Long automated captions: detailed and fine-grained descriptions with an average length of 111.7 words that capture spatial and temporal events. These captions are ideal for fine-grained video understanding." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 422, + 494, + 539, + 548 + ], + "blocks": [ + { + "bbox": [ + 422, + 494, + 539, + 548 + ], + "lines": [ + { + "bbox": [ + 422, + 494, + 539, + 548 + ], + "spans": [ + { + "bbox": [ + 422, + 494, + 539, + 548 + ], + "type": "table", + "html": "
Videos998,862
Human Captions118,862
Total Duration4625 hrs
Duration (s)16.7±9.8
Human Caption Length57.1±25.4
Model Caption Length111.7±43.2
", + "image_path": "0419cec4c68d3e21eb66b1cc748ebcb2ae14852eccf08775be3e31fa04f72c48.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 435, + 550, + 527, + 559 + ], + "lines": [ + { + "bbox": [ + 435, + 550, + 527, + 559 + ], + "spans": [ + { + "bbox": [ + 435, + 550, + 527, + 559 + ], + "type": "text", + "content": "Table 3 PVD Statistics." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "spans": [ + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "text", + "content": "In Fig. 7, we visualize a video example together with their model and human captions from PE Video Dataset (See Fig. 18 for more). The dataset statistics are summarized in Tab. 3. Finally, We use " + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "inline_equation", + "content": "105\\mathrm{K}" + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "text", + "content": " of these refined samples to improve the data engine (" + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "inline_equation", + "content": "\\S 2.2" + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "text", + "content": " phase 2) and " + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "inline_equation", + "content": "15\\mathrm{K}" + }, + { + "bbox": [ + 67, + 567, + 542, + 604 + ], + "type": "text", + "content": " as a high-quality video retrieval benchmark." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 613, + 543, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 543, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 543, + 696 + ], + "type": "text", + "content": "PVD Benchmark. We use 15K of the human-refined video-caption pairs as a held-out test set, which we introduce as a new video retrieval benchmark, PVD Benchmark, to evaluate finegrained video-caption alignment. We follow the format of MSR-VTT [153] to construct the benchmark. We select videos from 10 different categories, including hand actions, object interactions, food preparation, work activities, outdoor scenes, animals, water scenes, object handling, close-up shots, and nature scenes, with an overall average caption length of 51.7 words (see Appendix A.2.3 for statistics). We use PVD Benchmark to evaluate SigLIP [160], SigLIP2 [138], InternVL [19], and PE models, and the results can be found in Tab. 7." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 704, + 301, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 704, + 301, + 715 + ], + "spans": [ + { + "bbox": [ + 78, + 704, + 301, + 715 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 704, + 301, + 715 + ], + "type": "text", + "content": "PVD available at https://ai.meta.com/datasets/pe-video/" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 63, + 290, + 76 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 63, + 290, + 76 + ], + "spans": [ + { + "bbox": [ + 67, + 63, + 290, + 76 + ], + "type": "text", + "content": "2.4 A Unified Encoder for Image and Video" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 83, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 83, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 83, + 541, + 106 + ], + "type": "text", + "content": "Using a robust, scalable image pretraining recipe and video-pretraining data recaptioned by the proposed video data engine, in this section we present " + }, + { + "bbox": [ + 67, + 83, + 541, + 106 + ], + "type": "inline_equation", + "content": "\\mathsf{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 83, + 541, + 106 + ], + "type": "text", + "content": " , a unified image-and-video encoder." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 116, + 335, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 116, + 335, + 176 + ], + "spans": [ + { + "bbox": [ + 67, + 116, + 335, + 176 + ], + "type": "text", + "content": "Model Architecture. To capitalize on the promising scaling behavior observed in §2.1, we scale the largest " + }, + { + "bbox": [ + 67, + 116, + 335, + 176 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 116, + 335, + 176 + ], + "type": "text", + "content": " model to 2B parameters3 (G scale). Tab. 4 shows the detailed model configuration of the vision and text transformers and the dimension of the output clip embedding space." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 353, + 115, + 541, + 177 + ], + "blocks": [ + { + "bbox": [ + 353, + 115, + 541, + 177 + ], + "lines": [ + { + "bbox": [ + 353, + 115, + 541, + 177 + ], + "spans": [ + { + "bbox": [ + 353, + 115, + 541, + 177 + ], + "type": "table", + "html": "
ScaleTowerParamsWidthDepthMLPHeadsCLIP Dim
BVision0.09B768123072121024
Text0.31B102424409616
LVision0.32B1024244096161024
Text0.31B102424409616
GVision1.88B1536508960161280
Text0.47B128024512020
", + "image_path": "84e3e8bf18b5ba8bbb0729dc244ad4d6daf947726233ee1ff38ac64927363783.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 380, + 179, + 511, + 190 + ], + "lines": [ + { + "bbox": [ + 380, + 179, + 511, + 190 + ], + "spans": [ + { + "bbox": [ + 380, + 179, + 511, + 190 + ], + "type": "text", + "content": "Table 4 PE Model Configurations." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 185, + 336, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 185, + 336, + 197 + ], + "spans": [ + { + "bbox": [ + 67, + 185, + 336, + 197 + ], + "type": "text", + "content": "Smaller Model Distillation. To maximize the performance of" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 198, + 542, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 198, + 542, + 258 + ], + "spans": [ + { + "bbox": [ + 66, + 198, + 542, + 258 + ], + "type": "text", + "content": "smaller models (B and L scales in Tab. 4), we employ a distillation finetuning approach [49] using " + }, + { + "bbox": [ + 66, + 198, + 542, + 258 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 198, + 542, + 258 + ], + "type": "text", + "content": " as the teacher. This process involves a short finetuning schedule where both the student and teacher models encode image and text inputs separately to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence, distilling multimodal relational knowledge from the teacher into the student." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "text", + "content": "Notably, we find that using a smaller softmax temperature for the teacher's distributions, specifically " + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "inline_equation", + "content": "0.5 \\times" + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "text", + "content": " the temperature used for the student's distribution, significantly enhances the effectiveness of knowledge distillation. By leveraging the strong embeddings provided by " + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}} \\mathrm{G}" + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "text", + "content": ", our short distillation finetuning schedule significantly boosts the performance of both B and L scale models of " + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 262, + 541, + 312 + ], + "type": "text", + "content": " (see Appendix C.3)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 319, + 367, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 319, + 367, + 332 + ], + "spans": [ + { + "bbox": [ + 67, + 319, + 367, + 332 + ], + "type": "text", + "content": "Model Training. The training process of " + }, + { + "bbox": [ + 67, + 319, + 367, + 332 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 319, + 367, + 332 + ], + "type": "text", + "content": " involves three stages:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 80, + 338, + 541, + 495 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 80, + 338, + 541, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 338, + 541, + 375 + ], + "spans": [ + { + "bbox": [ + 80, + 338, + 541, + 375 + ], + "type": "text", + "content": "1. Image pretraining. We scale up image pretraining to 5.4B publicly available image alt-text pairs curated with MetaCLIP [152] and a total of 86B samples seen to ensure convergence (58B for B and L). We use a global batch size of 131K, with progressive resolution from 98 to up to 448 depending on the model." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 380, + 541, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 380, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 80, + 380, + 541, + 453 + ], + "type": "text", + "content": "2. Image and video finetuning. Following the initial pretraining, we subsequently finetune the model at max resolution with a short schedule for 50M samples on the image pretraining data (as cooldown) followed by 22M samples on the recaptioned videos with a smaller learning rate and batch size. The video captions are produced using the proposed video data engine (§2.2). For each video clip, we uniformly sample 8 frames, encode them, take their average to produce a single video embedding, and align them with the corresponding video captions using the same contrastive objective in image training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 80, + 457, + 541, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 457, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 80, + 457, + 541, + 495 + ], + "type": "text", + "content": "3. Smaller model distillation. We distill the 2B model (G scale) into smaller contrastive pretrained models at B and L scales under their final resolutions, using a short schedule that covers approximately 4B samples seen (" + }, + { + "bbox": [ + 80, + 457, + 541, + 495 + ], + "type": "inline_equation", + "content": "\\sim 8\\%" + }, + { + "bbox": [ + 80, + 457, + 541, + 495 + ], + "type": "text", + "content": " of the pretraining schedule) with a lower learning rate and no weight decay." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 499, + 402, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 499, + 402, + 512 + ], + "spans": [ + { + "bbox": [ + 67, + 499, + 402, + 512 + ], + "type": "text", + "content": "The detailed training configuration and setups are listed in Appendix B.1.1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 526, + 162, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 526, + 162, + 538 + ], + "spans": [ + { + "bbox": [ + 67, + 526, + 162, + 538 + ], + "type": "text", + "content": "2.5 Core Results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "spans": [ + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "text", + "content": "Zero-Shot Image Results. In Tab. 5, we present " + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "text", + "content": " 's performance on zero-shot image benchmarks for classification and retrieval vs. the strongest existing models, including SigLIP2 [138] and proprietary models using JFT-3B [29], which is likely tuned for ImageNet. " + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "text", + "content": " outperforms all other contrastive models across the board on all zero-shot tasks, including the highly competitive average of zero-shot ImageNet robustness metrics [4, 26, 46, 47, 112, 143]. This marks a significant achievement, as we are the first to accomplish this in over 3 years without access to Google's internal JFT-3B [29] or WebLI [17] datasets. And at the same time, " + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 549, + 542, + 645 + ], + "type": "text", + "content": " also exceeds the existing state-of-the-art on image-text retrieval and significantly improves on fine-grained classification—the first to simultaneously hold state-of-the-art on all common zero-shot categories." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "type": "text", + "content": "By harnessing the power of our video data engine, training with a relatively small dataset of 22M videos and their corresponding synthetic captions leads to substantial gains in image benchmarks, with average general image classification improving by " + }, + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "type": "inline_equation", + "content": "+0.6\\%" + }, + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "type": "text", + "content": " with emphasis on more difficult benchmarks (notably " + }, + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "type": "inline_equation", + "content": "+1.2\\%" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 694, + 541, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 694, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 694, + 541, + 715 + ], + "type": "text", + "content": "3We employ the setup described in §2.1 except for the additional class token (only used for L and B). Interestingly, we find using the same high learning rate " + }, + { + "bbox": [ + 67, + 694, + 541, + 715 + ], + "type": "inline_equation", + "content": "(2 \\times 10^{-3})" + }, + { + "bbox": [ + 67, + 694, + 541, + 715 + ], + "type": "text", + "content": " to perform well for G. We also did not find scaling the text encoder to be beneficial." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 62, + 548, + 280 + ], + "blocks": [ + { + "bbox": [ + 65, + 62, + 548, + 280 + ], + "lines": [ + { + "bbox": [ + 65, + 62, + 548, + 280 + ], + "spans": [ + { + "bbox": [ + 65, + 62, + 548, + 280 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Fine-Grained Classification
Avg Class.ImageNet w1 [26]ImageNet i2 [112]ObjectNet IV Classes [4]ImageNet adversarial [47]ImageNet Adversarial [48]ImageNet Renditions [46]ImageNet Sketch [143]Avg Fine.Food 107 [9]Flowers Oxford [97]Pets Oxford [100]Cars Stanford [59]Aircrafts FGC [88]Countries 2/1 [133]Scenes SUN397 [150]Satellite RESISC [20]Avg Retrieval1Zero-Shot Retrieval MS-COCO t+to ing [76]
Proprietary0.24B2246.6B84.385.786.380.682.385.695.776.1-95.191.297.9--------------------------------------------------0.24B2246.6B84.385.786.380.695.776.1-95.191.297.9-----------------------------------
BASIC [102]1.0B5764.8B85.786.380.695.776.1-95.191.297.9----------------------------72.651.266.380.492.585.786.380.695.776.1-------------------------------------------------MS-COCO t+to ing [76]MS-COCO img→to ing [76]MS-COCO img→to ing [75]
CoCa [158]1.0B5764.8B85.786.380.695.776.1-95.191.297.9---------------------72.651.266.380.492.585.786.380.695.776.1---0.24B2246.6B85.786.380.695.776.1-------------------------------------
LiT-22B [24]
", + "image_path": "18451512f5568fe8da5336fa21d737dcaf1979fc3235a99cb9187fc3c71d5477.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "spans": [ + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "text", + "content": "ObjectNet, " + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "inline_equation", + "content": "+1.4\\%" + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "text", + "content": " ImageNet Adversarial) and fine-grained classification by " + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "inline_equation", + "content": "+1.0\\%" + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "text", + "content": " on average. Furthermore, due to the high level of detail and alignment of our synthetic captions, zero-shot retrieval is significantly boosted by " + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "inline_equation", + "content": "+3.6\\%" + }, + { + "bbox": [ + 67, + 357, + 544, + 407 + ], + "type": "text", + "content": " on average. These results emphasize that training with well-aligned video text data does not just improve video performance—it creates a strictly better model for both videos and images." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 415, + 196, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 415, + 196, + 511 + ], + "spans": [ + { + "bbox": [ + 67, + 415, + 196, + 511 + ], + "type": "text", + "content": "Zero-Shot Video Results. We assess the performance of " + }, + { + "bbox": [ + 67, + 415, + 196, + 511 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 415, + 196, + 511 + ], + "type": "text", + "content": " on zero-shot video benchmarks by employing the same model as a frame-based video encoder, utilizing 8 uniformly sampled frames, as described in §2.2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 517, + 196, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 517, + 196, + 672 + ], + "spans": [ + { + "bbox": [ + 67, + 517, + 196, + 672 + ], + "type": "text", + "content": "We present the corresponding video results in Tab. 6. Our base image encoder already outperforms all other image-only encoders on both zero-shot classification and retrieval, including SigLIP2-g-opt. With video finetuning, " + }, + { + "bbox": [ + 67, + 517, + 196, + 672 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 517, + 196, + 672 + ], + "type": "text", + "content": " significantly outperforms even native video models that use full temporal attention on video classification, and nearly matches the" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 208, + 418, + 545, + 608 + ], + "blocks": [ + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "lines": [ + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "spans": [ + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "text", + "content": "Table 5 Zero-Shot Image Results. Image zero-shot performance of " + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "text", + "content": " compared to the state-of-the-art for both proprietary and open models. " + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "text", + "content": " is the first vision encoder to outperform the best models trained on the proprietary JFT-3B [29] and WebLI [17] on general classification. Moreover at all model sizes, " + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "text", + "content": " obtains state-of-the-art results across general classification, retrieval, and finegrained classification. " + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 67, + 282, + 544, + 338 + ], + "type": "text", + "content": "Re-evaluated: DFN by [130]; SigLIP and SigLIP2 by us with the same benchmark settings if not reported in [138] (see Appendix B.1.2)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 208, + 418, + 545, + 608 + ], + "lines": [ + { + "bbox": [ + 208, + 418, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 208, + 418, + 545, + 608 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolution #FramesVideo DataZero-Shot ClassificationZero-Shot Retrieval
Avg Class.Kinetics 409 [55]Kinetics 600 [55]Kinetics 700 [55]UCF 101 [126]HMDB 57 [62]Avg RetrievalMSR-VTT 304 [76]MSR-VTT 304 [76]MSVD 304 [76]MSVD 304 [76]MSVD 304 [76]ActivityNet 304 [76]ActivityNet 304 [76]
B Scale
CLIP [106]0.1B2248n/a54.358.455.146.168.943.229.230.424.240.557.29.113.2
CLIP4CLIP [84]0.1B22412n/a-------32.0-38.5---
SigLIP2-B/16†[138]0.1B2248n/a57.358.755.048.482.042.339.938.530.149.067.228.625.8
PEcoreB0.1B224822M63.965.665.155.884.648.249.947.647.350.476.739.038.4
L Scale
UMT-L [67]0.3B224825M------47.140.737.149.074.541.939.4
SigLIP2-L/16†[138]0.3B3848n/a64.165.362.556.886.749.344.741.531.453.774.235.931.5
PEcoreL0.3B336822M71.473.472.765.387.158.554.850.350.157.282.446.442.1
Unbounded Scale
InternVL [19]5.5B2248n/a-69.168.960.6---44.740.2----
InternVideo2 [146]1.0B2248102M70.773.172.864.988.853.959.951.950.958.183.360.454.8
VideoPrism-g* [164]1.1B28816619M-76.4-----39.771.0--52.750.3
SigLIP2-g-opt†[138]1.1B3848n/a68.269.867.061.890.751.846.643.134.255.874.638.333.4
PEcoreG (image only)1.9B4488n/a70.973.172.264.389.555.547.644.335.254.373.941.436.3
PEcoreG1.9B448822M74.876.976.169.190.761.158.751.249.959.785.454.751.2
", + "image_path": "2e35a5b4cdd10463a1e04015eaa50ce24ce1e8fd08ad30843e4c022ac3a800c4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "lines": [ + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "spans": [ + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "text", + "content": "Table 6 Zero-Shot Video Results. Video performance of " + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "text", + "content": " compared to recent video and image encoders. " + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "text", + "content": " obtains state-of-the-art in video classification and comparable performance on retrieval benchmarks while using only 22M videos. " + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "inline_equation", + "content": "^*" + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "text", + "content": " Proprietary models. " + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "inline_equation", + "content": "{}^{+}\\mathrm{SigLIP2}" + }, + { + "bbox": [ + 208, + 609, + 543, + 666 + ], + "type": "text", + "content": " are evaluated by us with the same zero-shot prompts frame embedding averaging strategy (as in [19, 84, 106]). See Appendix B.1.2." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "text", + "content": "state-of-the-art on video retrieval using a simple frame-level encoder. This result underscores the importance of our video data engine, resulting in " + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "inline_equation", + "content": "+3.9\\%" + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "text", + "content": " on average zero-shot video classification, and a massive " + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "inline_equation", + "content": "+11.1\\%" + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "text", + "content": " on retrieval. Moreover, " + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 673, + 544, + 721 + ], + "type": "text", + "content": " does this with much less video data compared to other video-based approaches like InternVideo2 [146] and VideoPrism [164], highlighting the benefits of a joint image-video encoder." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 61, + 337, + 173 + ], + "blocks": [ + { + "bbox": [ + 75, + 61, + 337, + 173 + ], + "lines": [ + { + "bbox": [ + 75, + 61, + 337, + 173 + ], + "spans": [ + { + "bbox": [ + 75, + 61, + 337, + 173 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolutionDataZero-Shot ClassificationZero-Shot Retrieval
ObjectNet [4]ObjectNet [4]Inaturalist 2017 [140]Dollar St 58 [39, 113]TextCaps img→cat [122]TextCaps Flip img→cat [122]PVD Bench img→vidPVD Bench vid→cat
SigLIP2-B/16 [138]0.1B22410B73.659.116.955.972.069.853.960.1
PEcore B0.1B2245.4B71.958.325.952.172.371.959.861.1
SigLIP2-L/16 [138]0.3B38410B84.473.226.757.678.076.261.967.1
PEcore L0.3B3365.4B84.774.335.359.678.578.364.765.2
InternVL-C [19]5.5B2245B80.667.219.458.272.367.863.465.1
SigLIP2-g-opt [138]1.1B38410B88.078.131.559.378.876.962.567.1
PEcore G1.9B4485.4B88.279.041.162.378.878.777.076.6
", + "image_path": "a153733586205ebf63f98fe5ca0ce22decc28af55d3f7589bc0d01e7ddca09b3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 369, + 62, + 541, + 163 + ], + "blocks": [ + { + "bbox": [ + 67, + 175, + 345, + 209 + ], + "lines": [ + { + "bbox": [ + 67, + 175, + 345, + 209 + ], + "spans": [ + { + "bbox": [ + 67, + 175, + 345, + 209 + ], + "type": "text", + "content": "Table 7 Additional Zero-Shot Results. We present several additional zero-shot benchmarks from existing datasets and our own PVD (§2.3) to address evaluation gaps left by standard benchmarks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 369, + 62, + 541, + 163 + ], + "lines": [ + { + "bbox": [ + 369, + 62, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 369, + 62, + 541, + 163 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolutionDataEncoder Probing
ImageNet [26]ImageNet [26]ImageNet [26] Attention
DINOv2-g [98]1.1B224145M83.586.5\\( 87.2^{\\dagger} \\)
RADIOv2.5-g [45]1.1B518-85.3--
AIMv2 3B [37]2.7B4487.2B--89.5
InternVL-C [19]5.5B2245B-88.2-
EVA 18B [130]17.5B2242B-88.9-
\\( PE_{core}G \\)1.9B4485.4B86.889.589.8
", + "image_path": "b442b6245b11605dc45b684c129fb444053fa41af40d35c15539a8fc5181254b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 368, + 165, + 543, + 210 + ], + "lines": [ + { + "bbox": [ + 368, + 165, + 543, + 210 + ], + "spans": [ + { + "bbox": [ + 368, + 165, + 543, + 210 + ], + "type": "text", + "content": "Table 8 Encoder Probing Results. We evaluate " + }, + { + "bbox": [ + 368, + 165, + 543, + 210 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 368, + 165, + 543, + 210 + ], + "type": "text", + "content": " G's frozen features using the typical probing methods to compare to models without zero-shot support. from [37]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 230, + 544, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 230, + 544, + 268 + ], + "spans": [ + { + "bbox": [ + 66, + 230, + 544, + 268 + ], + "type": "text", + "content": "Additional Zero-Shot Benchmarks. We further evaluate " + }, + { + "bbox": [ + 66, + 230, + 544, + 268 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 230, + 544, + 268 + ], + "type": "text", + "content": " on an additional set of zero-shot classification and retrieval benchmarks we construct in Tab. 7 to address key gaps in common benchmarks. For comparison, we also evaluate SigLIP2 [138] and InternVL-C [19] on these benchmarks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "spans": [ + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "content": "First, we note that the version of ObjectNet [4] that is standard to benchmark robustness (e.g., in Tab. 5) is not the full set. ObjectNet consists of 313 classes of objects in challenging and uncommon orientations, locations, and viewpoints. However, the standard version used for benchmarking is a 113 class subset of classes that overlap with ImageNet-1k [26]. Naturally, benchmarking in this way rewards performing well on ImageNet classes over generality. To remove this bias, we construct the full ObjectNet set with all classes and compare to the reduced ObjectNet set in Tab. 7. Surprisingly, we find that while " + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "content": " performs " + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "inline_equation", + "content": "+7.6\\%" + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "content": " over InternVL-C and only " + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "inline_equation", + "content": "+0.2\\%" + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "content": " over SigLIP2-g-opt on the reduced ObjectNet set, it performs " + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "inline_equation", + "content": "+11.8\\%" + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "content": " over InternVL-C and " + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "inline_equation", + "content": "+0.9\\%" + }, + { + "bbox": [ + 66, + 272, + 544, + 369 + ], + "type": "text", + "content": " over SigLIP2-g-opt on the full set of classes, highlighting PE's generality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "spans": [ + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": "Next, we include iNaturalist [140] as a zero-shot benchmark because of its level of specificity with 2,101 fine-grained long-tail classes. " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " outperforms the next best SigLIP2-g-opt model by " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "+9.6\\%" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " , emphasizing PE's long tail knowledge. We then evaluate PE's cultural diversity on Dollar Street " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "[113]^4" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " , which consists of images of under-represented populations. Here too we find " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " to outperform existing methods, with " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "+3.0\\%" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " over SigLIP2-g-opt. Further, we test OCR performance by setting up TextCaps [122] as a retrieval dataset. Notably, " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " performs on par or better than SigLIP, which is known for good OCR performance. This is potentially surprising, as the horizontal flip augmentation we used during robust pretraining (S2.1) is typically thought to hurt OCR performance. However, instead it seems to have given " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " the ability to read backwards: we test the same TextCaps retrieval but with all images horizontally flipped. Other models suffer from this, but " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " 's performance only drops by " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " . Finally, we evaluate " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " on the PVD benchmark (S2.3), a challenging video retrieval task on 15K diverse and human-refined videos. Here, " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " significantly outperforms InternVL [19] by " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "+13.6\\%" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " on text " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " video and " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "+9.5\\%" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " to SigLIP2 [138] on video " + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 66, + 373, + 544, + 518 + ], + "type": "text", + "content": " text." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "spans": [ + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "type": "text", + "content": "Frozen Encoder Probing Results. To compare against models that are not capable of zero-shot classification, we additionally evaluate " + }, + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "type": "text", + "content": " using k nearest neighbors (following [98]), linear probing (following [19]), and attention probing (following [37]) on top of the ImageNet-1k [26] train set. We present these results in Tab. 8 and compare to other encoders using their reported numbers. In every case, " + }, + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 527, + 544, + 588 + ], + "type": "text", + "content": " outperforms all existing open encoders, including those with significantly more parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 596, + 544, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 596, + 544, + 645 + ], + "spans": [ + { + "bbox": [ + 66, + 596, + 544, + 645 + ], + "type": "text", + "content": "Summary. " + }, + { + "bbox": [ + 66, + 596, + 544, + 645 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 596, + 544, + 645 + ], + "type": "text", + "content": " , a unified image-video encoder, achieves state-of-the-art performance across zero-shot classification and retrieval on both images and videos on a wide variety of benchmarks. This synergy is made possible by our robust image pretraining recipe (§2.1) and powerful video data engine (§2.2), which together enable the model to effectively leverage the strengths of both image and video data at scale." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 712, + 424, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 712, + 424, + 723 + ], + "spans": [ + { + "bbox": [ + 78, + 712, + 424, + 723 + ], + "type": "text", + "content": "4We use the version provided by [39] and re-evaluate all models to ensure a fair comparison." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 346, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 346, + 79 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 346, + 79 + ], + "type": "text", + "content": "3 General Features in a Contrastive Disguise" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 90, + 543, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 90, + 543, + 126 + ], + "spans": [ + { + "bbox": [ + 67, + 90, + 543, + 126 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 90, + 543, + 126 + ], + "type": "text", + "content": " puts up strong results on the tasks contrastive encoders are known for, like zero-shot classification and retrieval. But while those tasks are useful, they are only a small part of the vision ecosystem. What really matters is whether or not the features learned with our pretraining recipe are useful to downstream tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 132, + 543, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 132, + 543, + 193 + ], + "spans": [ + { + "bbox": [ + 66, + 132, + 543, + 193 + ], + "type": "text", + "content": "Today's common wisdom in the vision community cites that different pretraining methods result in features useful for different tasks: e.g., contrastive for classification, captioning for language modeling, and self-supervised learning for spatial tasks. To see how " + }, + { + "bbox": [ + 66, + 132, + 543, + 193 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 132, + 543, + 193 + ], + "type": "text", + "content": " stacks up against against models with different pretraining techniques, we compare its frozen features to the state-of-the-art large-scale models for captioning (AIMv2-3B [37]) and self-supervised learning (DINOv2-g [98]) on a variety of downstream tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 201, + 244, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 201, + 244, + 511 + ], + "spans": [ + { + "bbox": [ + 67, + 201, + 244, + 511 + ], + "type": "text", + "content": "Layerwise Feature Analysis. We summarize the results of our frozen feature analysis in Fig. 8 for several downstream benchmarks in 3 categories: classification, language modeling, and spatial tasks. For classification, we probe each model using a randomly initialized cross attention transformer block. For language alignment, we use the Perception Language Model (PLM) [21] frozen encoder evaluation setup, learning a projector and finetuning a decoder-only LLM (see §4), and for spatial tasks we train with several different decoders (ViTDet [72] Mask-RCNN [43] with Absolute Win [7] for detection, DPT [109] for depth, and zero-shot feature correspondence for tracking [52]). For each experiment, we sweep over the layers of the model as the optimal features are not necessarily the last [18]. In each case, we use an equivalent image size (window size for detection) of " + }, + { + "bbox": [ + 67, + 201, + 244, + 511 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 67, + 201, + 244, + 511 + ], + "type": "text", + "content": " tokens. In each plot, we normalize performance by the maximum and minimum performance across models on that task." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 521, + 243, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 521, + 243, + 700 + ], + "spans": [ + { + "bbox": [ + 67, + 521, + 243, + 700 + ], + "type": "text", + "content": "An Alignment Problem. This analysis reveals several insights. First, as expected, AIMv2 performs well at classification and the best at visual Q&A language tasks. Similarly, DINOv2 performs the well on spatial tasks like detection, depth, and even performs the best at grounding through an LLM. Then as already established by other works: DINOv2 lacks performance on OCR tasks [134]. This is no secret, but what is interesting is that its performance peaks in the middle of the network and then drops significantly by the end. And so does the performance of other models" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 258, + 205, + 541, + 601 + ], + "blocks": [ + { + "bbox": [ + 258, + 205, + 541, + 601 + ], + "lines": [ + { + "bbox": [ + 258, + 205, + 541, + 601 + ], + "spans": [ + { + "bbox": [ + 258, + 205, + 541, + 601 + ], + "type": "image", + "image_path": "f5fca3271c106d2ca387a323bc011e0ec62f8183ffeae011885fd74e2caabfaa.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "lines": [ + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "spans": [ + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "type": "text", + "content": "Figure 8 Layer Analysis. Evaluating intermediate layers as frozen features across tasks for different pretraining methods: captioning (AIMv2-3B [37], left), spatially self-supervised (DINOv2-g [98], middle), and our contrastive recipe " + }, + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "type": "inline_equation", + "content": "\\mathrm{(PE_{core}G}" + }, + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "type": "text", + "content": ", right). Vertical lines denote the best layer and horizontal lines the best performance across models. As expected, AIMv2 performs well on language but not spatial, and DINOv2 performs well on spatial but not language. But surprisingly, intermediate layers of " + }, + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 255, + 604, + 543, + 692 + ], + "type": "text", + "content": " perform well on both language modeling and spatial tasks." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 700, + 503, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 700, + 503, + 714 + ], + "spans": [ + { + "bbox": [ + 67, + 700, + 503, + 714 + ], + "type": "text", + "content": "for other downstream tasks (AIMv2: tracking, grounding, detection; DINOv2: VQ&A, grounding)." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "text", + "content": " exhibits similar behavior, but with unexpected results. Unlike the others, in earlier layers of the network " + }, + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 543, + 125 + ], + "type": "text", + "content": " performs well on all tasks, often matching or exceeding the leading models. Remarkably, PE has intermediate layers that perform near to or on par with AIMv2 for language tasks and DINOv2 for spatial tasks, despite being trained with contrastive loss. Depth estimation is particularly noteworthy, as contrastive encoders are not typically considered state-of-the-art in that area." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 129, + 543, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 129, + 543, + 202 + ], + "spans": [ + { + "bbox": [ + 66, + 129, + 543, + 202 + ], + "type": "text", + "content": "However, in almost all cases this strong performance diminishes rapidly towards the end of the network. In fact, the performance of " + }, + { + "bbox": [ + 66, + 129, + 543, + 202 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 129, + 543, + 202 + ], + "type": "text", + "content": " in the final layer is abysmal for certain tasks, such as LLM-based grounding (the reason for which will become apparent in §5). This behavior is less pronounced the closer the downstream task is to the pretraining method, suggesting an alignment problem. Specifically, a well-tuned large-scale contrastive model can learn general embeddings in the process of fitting its objective, but it fails to output them. Therefore, to reveal these embeddings, the model must be subsequently aligned to downstream tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 211, + 543, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 211, + 543, + 248 + ], + "spans": [ + { + "bbox": [ + 66, + 211, + 543, + 248 + ], + "type": "text", + "content": "Analysis. The finding that pure CLIP models possess features which match the performance of state-of-the-art pretraining methods in their specialized domains is new. In fact, recent work [31] has shown the opposite—that CLIP models fail to scale on downstream tasks. We next investigate how our approach yields these results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 253, + 268, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 268, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 268, + 433 + ], + "type": "text", + "content": "To start, we perform layerwise frozen feature analysis on COCO detection. " + }, + { + "bbox": [ + 67, + 253, + 268, + 433 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 253, + 268, + 433 + ], + "type": "text", + "content": " was particularly \"peaky\" on this task in Fig. 8, with its best layer on par with DINOv2, but last layer significantly worse. We already ablated each change we made from vanilla CLIP in Fig. 2 using a ViT-L/14 model. So to retrace our steps, we run frozen feature analysis on those checkpoints. For efficiency, we perform this experiment at a lower resolution and only sample even layers. In Fig. 9, we report COCO box mAP for the last and best layers for each cumulative ablation, along with the index of the best layer. Further, we plot the layerwise performance for each change in Fig. 10." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 437, + 268, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 437, + 268, + 474 + ], + "spans": [ + { + "bbox": [ + 67, + 437, + 268, + 474 + ], + "type": "text", + "content": "Surprisingly, the simple changes we made in §2.1 to construct our pretraining recipe overall improved the best layer's performance by" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 474, + 543, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 474, + 543, + 570 + ], + "spans": [ + { + "bbox": [ + 66, + 474, + 543, + 570 + ], + "type": "text", + "content": "almost " + }, + { + "bbox": [ + 66, + 474, + 543, + 570 + ], + "type": "inline_equation", + "content": "10\\,mAP" + }, + { + "bbox": [ + 66, + 474, + 543, + 570 + ], + "type": "text", + "content": " over vanilla CLIP! Some changes like high resolution (5) and RoPE (6) improving spatial features is to be expected, but unexpectedly data augmentation (8) and especially progressive resolution (2) help considerably. It is possible that contrastive pretraining is prone to overfit to the \"global\" nature of the task through \"global tokens\" [23]. However, as the model cannot maintain global tokens in the same place due to the resolution progressively changing, it is forced to be more robust. Also of note is that both progressive resolution (2) and attention pooling (7) move the argmax layer deeper into the network (rightmost column of Fig. 9). Attention pooling in particular alters the whole shape of the layerwise performance curve (Fig. 10), while the other changes typically only raise or lower it." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 78, + 581, + 216, + 671 + ], + "blocks": [ + { + "bbox": [ + 78, + 581, + 216, + 671 + ], + "lines": [ + { + "bbox": [ + 78, + 581, + 216, + 671 + ], + "spans": [ + { + "bbox": [ + 78, + 581, + 216, + 671 + ], + "type": "image", + "image_path": "fc9e5f4a4cb1aee69d2d431bcfb675feed0a0647d32f7580603105b76e8e7e13.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 681, + 214, + 703 + ], + "lines": [ + { + "bbox": [ + 67, + 681, + 214, + 703 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 214, + 703 + ], + "type": "text", + "content": "Figure 10 Layer Analysis corresponding to the results presented in Fig. 9." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 226, + 575, + 543, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 575, + 543, + 708 + ], + "spans": [ + { + "bbox": [ + 226, + 575, + 543, + 708 + ], + "type": "text", + "content": "Potentially more interesting is what did not improve performance: specifically, increasing the batch size (3) and using LAMB with a high learning rate (4). Both of these changes explicitly help the model fit the CLIP loss better, which after a certain point may not improve the general features. Moreover, while the best layer overall improved significantly, the last layer performance stagnated after (2). This suggests that constructing the global CLIP token requires a substantial \"decoder\" (in this case, 6 layers for the final L/14 model). Although the features of this decoder are beneficial for some tasks (e.g., Visual Q&A as shown in Fig. 8), they are not general. Nevertheless, this does not prevent the model from learning general features; it merely limits their expression in the output." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 288, + 260, + 547, + 412 + ], + "blocks": [ + { + "bbox": [ + 288, + 260, + 547, + 412 + ], + "lines": [ + { + "bbox": [ + 288, + 260, + 547, + 412 + ], + "spans": [ + { + "bbox": [ + 288, + 260, + 547, + 412 + ], + "type": "image", + "image_path": "bb11b4227ea27e2b7a911634295b0442145b980ca3b98799f6c03070636667d3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 281, + 422, + 543, + 466 + ], + "lines": [ + { + "bbox": [ + 281, + 422, + 543, + 466 + ], + "spans": [ + { + "bbox": [ + 281, + 422, + 543, + 466 + ], + "type": "text", + "content": "Figure 9 The Downstream Effects of Robust Pretraining. The ViT-L/14 checkpoints from Fig. 2 evaluated as frozen features on COCO [76] using Mask R-CNN [43]. We report the last layer performance, best layer performance, and the best layer's index." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 242, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 242, + 160 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 242, + 160 + ], + "type": "text", + "content": "Scaling Behavior. Finding a simple, easily scalable vision pretraining method that produces generally useful features has been the white whale of the vision community for a while. Evidently, our robust recipe can enable contrastive pretraining to produce general features. So that begs the question, \"does it scale?\"" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 165, + 242, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 165, + 242, + 274 + ], + "spans": [ + { + "bbox": [ + 67, + 165, + 242, + 274 + ], + "type": "text", + "content": "We can answer this question in the same way: by performing frozen feature layer analysis of our S/14, B/14, and L/14 scaling ablation checkpoints from Fig. 3. We report the result of that analysis in Fig. 11. We also include our final " + }, + { + "bbox": [ + 67, + 165, + 242, + 274 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 165, + 242, + 274 + ], + "type": "text", + "content": " model using the same setup, but note this is an estimate as our ablation and final schedules are different." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 279, + 244, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 279, + 244, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 279, + 244, + 483 + ], + "type": "text", + "content": "Immediately, we see a stark contrast between the scaling behavior of the vanilla CLIP recipe and ours. While the vanilla recipe quickly plateaus at L scale (300M), the best layer of our robust pretraining recipe demonstrates scaling to G scale (2B) and potentially beyond—despite being trained with a decidedly non-spatially aligned global contrastive loss. However, this is the best layer. The last layer performance still stagnates for both the vanilla recipe and ours. This may be why prior work [31] finds contrastive pretraining to not scale for downstream tasks—CLIP loss obfuscates its general features even with our recipe, placing them several layers deep." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 488, + 242, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 488, + 242, + 668 + ], + "spans": [ + { + "bbox": [ + 67, + 488, + 242, + 668 + ], + "type": "text", + "content": "However, this is just for a single spatial task. To see whether the trend is consistent, we repeat this scaling analysis on a wide variety of downstream language modeling tasks using the same frozen evaluation setup as Fig. 8 and report the results in Fig. 12. Surprisingly, the simple change in pretraining recipe improves scaling for most language tasks as well—including output-side grounding (RefCOCO). Note that in this benchmarking setup, the LLM never sees videos during training so the Video Q&A per-layer results are noisy. Yet, the best layer trend is still the same." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 673, + 241, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 241, + 685 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 241, + 685 + ], + "type": "text", + "content": "Clearly, contrastive pretraining with our" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 265, + 67, + 356, + 139 + ], + "blocks": [ + { + "bbox": [ + 326, + 56, + 383, + 64 + ], + "lines": [ + { + "bbox": [ + 326, + 56, + 383, + 64 + ], + "spans": [ + { + "bbox": [ + 326, + 56, + 383, + 64 + ], + "type": "text", + "content": "Object Detection" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 265, + 67, + 356, + 139 + ], + "lines": [ + { + "bbox": [ + 265, + 67, + 356, + 139 + ], + "spans": [ + { + "bbox": [ + 265, + 67, + 356, + 139 + ], + "type": "image", + "image_path": "680e4b398d5756980de8964a501a60f2ed9e9bc97c89dc8c8f2713f06c35df5c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 359, + 67, + 442, + 139 + ], + "blocks": [ + { + "bbox": [ + 359, + 67, + 442, + 139 + ], + "lines": [ + { + "bbox": [ + 359, + 67, + 442, + 139 + ], + "spans": [ + { + "bbox": [ + 359, + 67, + 442, + 139 + ], + "type": "image", + "image_path": "1b0a4479ace41e4468c9d0b366523e8a1b53945a1a9b0b2119f2404c3d13aa7d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 148, + 542, + 193 + ], + "lines": [ + { + "bbox": [ + 255, + 148, + 542, + 193 + ], + "spans": [ + { + "bbox": [ + 255, + 148, + 542, + 193 + ], + "type": "text", + "content": "Figure 11 The Downstream Scalability of Robust Pretraining. Left: frozen feature layer analysis of the S/14, B/14, and L/14 models from Fig. 3 using the same setup as Fig. 9. Right: scaling behavior of the best layer for each model. Note: G is our final model and has a different schedule." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 447, + 60, + 539, + 138 + ], + "blocks": [ + { + "bbox": [ + 447, + 60, + 539, + 138 + ], + "lines": [ + { + "bbox": [ + 447, + 60, + 539, + 138 + ], + "spans": [ + { + "bbox": [ + 447, + 60, + 539, + 138 + ], + "type": "image", + "image_path": "f5558127ca340630103f112802d3339a03ea6bb487c5a14602365c00083566cf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 265, + 212, + 356, + 285 + ], + "blocks": [ + { + "bbox": [ + 337, + 201, + 372, + 209 + ], + "lines": [ + { + "bbox": [ + 337, + 201, + 372, + 209 + ], + "spans": [ + { + "bbox": [ + 337, + 201, + 372, + 209 + ], + "type": "text", + "content": "OCR Q&A" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 265, + 212, + 356, + 285 + ], + "lines": [ + { + "bbox": [ + 265, + 212, + 356, + 285 + ], + "spans": [ + { + "bbox": [ + 265, + 212, + 356, + 285 + ], + "type": "image", + "image_path": "f0f9c8d5c4a5a11e170bd6d017535455669fe0b3375e69c34192d6d802625bff.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 358, + 212, + 441, + 285 + ], + "blocks": [ + { + "bbox": [ + 358, + 212, + 441, + 285 + ], + "lines": [ + { + "bbox": [ + 358, + 212, + 441, + 285 + ], + "spans": [ + { + "bbox": [ + 358, + 212, + 441, + 285 + ], + "type": "image", + "image_path": "1d919e2be4972d7ee8c715e35452670a046aa1a14f011a743ec97cf94f488312.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 289, + 373, + 297 + ], + "lines": [ + { + "bbox": [ + 335, + 289, + 373, + 297 + ], + "spans": [ + { + "bbox": [ + 335, + 289, + 373, + 297 + ], + "type": "text", + "content": "Visual Q&A" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 446, + 206, + 543, + 285 + ], + "blocks": [ + { + "bbox": [ + 446, + 206, + 543, + 285 + ], + "lines": [ + { + "bbox": [ + 446, + 206, + 543, + 285 + ], + "spans": [ + { + "bbox": [ + 446, + 206, + 543, + 285 + ], + "type": "image", + "image_path": "97e0715fa950508aba5efbe4d86caa4736b44d0c0bc64e09a56362282f848505.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 265, + 300, + 356, + 373 + ], + "blocks": [ + { + "bbox": [ + 265, + 300, + 356, + 373 + ], + "lines": [ + { + "bbox": [ + 265, + 300, + 356, + 373 + ], + "spans": [ + { + "bbox": [ + 265, + 300, + 356, + 373 + ], + "type": "image", + "image_path": "8eb0b6d76f12e7e9096fe1271f2475ea64da59c454705c61c47c9dd77a4cdd00.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 377, + 372, + 386 + ], + "lines": [ + { + "bbox": [ + 335, + 377, + 372, + 386 + ], + "spans": [ + { + "bbox": [ + 335, + 377, + 372, + 386 + ], + "type": "text", + "content": "Captioning" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 358, + 300, + 441, + 373 + ], + "blocks": [ + { + "bbox": [ + 358, + 300, + 441, + 373 + ], + "lines": [ + { + "bbox": [ + 358, + 300, + 441, + 373 + ], + "spans": [ + { + "bbox": [ + 358, + 300, + 441, + 373 + ], + "type": "image", + "image_path": "26509c66b2d2df75bdcefbf9805f13ce7b78a3f5f1cd8a7e80b714ae026923a8.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 446, + 294, + 543, + 373 + ], + "blocks": [ + { + "bbox": [ + 446, + 294, + 543, + 373 + ], + "lines": [ + { + "bbox": [ + 446, + 294, + 543, + 373 + ], + "spans": [ + { + "bbox": [ + 446, + 294, + 543, + 373 + ], + "type": "image", + "image_path": "a52d0f7bd5611a851c47ac32cdcbebaceb3ff1ce3d7386aac43357af4d5233e1.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 265, + 388, + 356, + 460 + ], + "blocks": [ + { + "bbox": [ + 265, + 388, + 356, + 460 + ], + "lines": [ + { + "bbox": [ + 265, + 388, + 356, + 460 + ], + "spans": [ + { + "bbox": [ + 265, + 388, + 356, + 460 + ], + "type": "image", + "image_path": "60e724836714f148b095c958f2fa3c8365440358f8725695bdf36b507e2fa403.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 356, + 388, + 441, + 460 + ], + "blocks": [ + { + "bbox": [ + 356, + 388, + 441, + 460 + ], + "lines": [ + { + "bbox": [ + 356, + 388, + 441, + 460 + ], + "spans": [ + { + "bbox": [ + 356, + 388, + 441, + 460 + ], + "type": "image", + "image_path": "78c17213db120c5ebe12a05496435a8d303071b8bdb87f16e11e39ab47c4765b.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 446, + 382, + 542, + 460 + ], + "blocks": [ + { + "bbox": [ + 446, + 382, + 542, + 460 + ], + "lines": [ + { + "bbox": [ + 446, + 382, + 542, + 460 + ], + "spans": [ + { + "bbox": [ + 446, + 382, + 542, + 460 + ], + "type": "image", + "image_path": "20d8251e5ba4bf8d5681fa3ff5be9fcc32ccefbaa9ebd7cec8f10d02b7a25c31.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 265, + 476, + 356, + 548 + ], + "blocks": [ + { + "bbox": [ + 336, + 466, + 372, + 474 + ], + "lines": [ + { + "bbox": [ + 336, + 466, + 372, + 474 + ], + "spans": [ + { + "bbox": [ + 336, + 466, + 372, + 474 + ], + "type": "text", + "content": "Grounding" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 265, + 476, + 356, + 548 + ], + "lines": [ + { + "bbox": [ + 265, + 476, + 356, + 548 + ], + "spans": [ + { + "bbox": [ + 265, + 476, + 356, + 548 + ], + "type": "image", + "image_path": "9c8a0c3c990863113bbb03ad44d3adc6e267d088dfd26283ab9f4a70b3660758.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 358, + 476, + 441, + 548 + ], + "blocks": [ + { + "bbox": [ + 358, + 476, + 441, + 548 + ], + "lines": [ + { + "bbox": [ + 358, + 476, + 441, + 548 + ], + "spans": [ + { + "bbox": [ + 358, + 476, + 441, + 548 + ], + "type": "image", + "image_path": "ccbd47c6e6d593acf38cd94b6da64e2f459998bfa155f47040da5a83dd7caea4.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 446, + 470, + 542, + 548 + ], + "blocks": [ + { + "bbox": [ + 446, + 470, + 542, + 548 + ], + "lines": [ + { + "bbox": [ + 446, + 470, + 542, + 548 + ], + "spans": [ + { + "bbox": [ + 446, + 470, + 542, + 548 + ], + "type": "image", + "image_path": "dd821677cb7f3f76bd438b9007918a7b8f2f673bbbe2297e30d83246224039ab.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 265, + 564, + 356, + 636 + ], + "blocks": [ + { + "bbox": [ + 335, + 554, + 372, + 561 + ], + "lines": [ + { + "bbox": [ + 335, + 554, + 372, + 561 + ], + "spans": [ + { + "bbox": [ + 335, + 554, + 372, + 561 + ], + "type": "text", + "content": "Video Q&A" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 265, + 564, + 356, + 636 + ], + "lines": [ + { + "bbox": [ + 265, + 564, + 356, + 636 + ], + "spans": [ + { + "bbox": [ + 265, + 564, + 356, + 636 + ], + "type": "image", + "image_path": "c6abf374b521c762f6d0b8e1d04cb5578725fcd2cbb8f2abdab0bd9b47747a60.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 358, + 563, + 441, + 636 + ], + "blocks": [ + { + "bbox": [ + 358, + 563, + 441, + 636 + ], + "lines": [ + { + "bbox": [ + 358, + 563, + 441, + 636 + ], + "spans": [ + { + "bbox": [ + 358, + 563, + 441, + 636 + ], + "type": "image", + "image_path": "8139c17439c85304530ae62c31deae51f505bb24714eb0c4741895c57897056c.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 646, + 542, + 680 + ], + "lines": [ + { + "bbox": [ + 255, + 646, + 542, + 680 + ], + "spans": [ + { + "bbox": [ + 255, + 646, + 542, + 680 + ], + "type": "text", + "content": "Figure 12 Further Scalability Analysis. We repeat the analysis from Fig. 11 on a wide range of downstream tasks by adapting to a language model. Each category is an average of several downstream tasks (see §4)." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 446, + 555, + 542, + 636 + ], + "blocks": [ + { + "bbox": [ + 446, + 555, + 542, + 636 + ], + "lines": [ + { + "bbox": [ + 446, + 555, + 542, + 636 + ], + "spans": [ + { + "bbox": [ + 446, + 555, + 542, + 636 + ], + "type": "image", + "image_path": "92e7132599ebc88ee01bb6b8843129af5bde6cdef2acbe9db261ddbed7c8ddd1.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "bbox": [ + 67, + 685, + 542, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 685, + 542, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 685, + 542, + 723 + ], + "type": "text", + "content": "robust recipe produces strong general features that scale. However, these features are not going to be much use stuck in the middle of the network. To remedy this, in the remaining sections we will discuss methods for aligning these general features to the output of the network for both language modeling and spatial tasks." + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 342, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 342, + 79 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 342, + 79 + ], + "type": "text", + "content": "4 Perception Encoder: Language Alignment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "spans": [ + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "type": "text", + "content": "In §3 we have seen that " + }, + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "type": "text", + "content": " already possesses useful features for vision-language modeling. In this section, we lift these features through alignment tuning to construct a new encoder, " + }, + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 66, + 84, + 541, + 145 + ], + "type": "text", + "content": ", specialized for multimodal large language models (MLLMs). Our principle is to design not only the most performant, but also the most general vision encoder for use in MLLM development. To this end, we want a single language-aligned encoder that performs well across language models, across input resolutions, and for a wide variety of MLLM tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 154, + 543, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 154, + 543, + 228 + ], + "spans": [ + { + "bbox": [ + 66, + 154, + 543, + 228 + ], + "type": "text", + "content": "MLLM Evaluation Tasks. In this section, our main testbed is to adapt vision encoders to MLLMs and test on various MLLM tasks. We evaluate the downstream performance of each MLLM across five task categories: (1) OCR, Chart, Document Q&A on ChartQA [165], DocVQA [91], InfoVQA [92] and AI2D [57]; (2) Visual Q&A on TextVQA [125], OK-VQA [118], POPE [73], and VQAv2 [40]; (3) Captioning on Flicker [157], COCO [76], and No Cap [1]; (4) Video Understanding on VideoMME [38], STAR [148], TGIF-QA [53], EgoSchema [89], MVBenchmark [68], and PerceptionTest [105]; and finally (5) Grounding on RefCOCO [56]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 236, + 241, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 236, + 241, + 249 + ], + "spans": [ + { + "bbox": [ + 67, + 236, + 241, + 249 + ], + "type": "text", + "content": "4.1 Language Alignment Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "spans": [ + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "type": "text", + "content": "We begin by searching for the optimal language alignment method. We design our alignment tuning based on the midtraining stage of Perception Language Model (PLM) [21], which is to adapt " + }, + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "type": "text", + "content": " to a pretrained decoder-only LLM (Llama 3 [82]) connected by a vision projector. We start with \"warmup\" training stage with autoregressive next-token prediction loss on 1M image-text samples from pretraining, where everything but the projector is frozen. Then, we proceed to finetune all parameters on 70M data samples [21] covering natural images, documents/charts/diagrams, and videos, using the same next-token prediction loss. After completing this language alignment, we extract the vision encoder from the model and refer to it as " + }, + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 66, + 255, + 541, + 340 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 345, + 348, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 345, + 348, + 417 + ], + "spans": [ + { + "bbox": [ + 67, + 345, + 348, + 417 + ], + "type": "text", + "content": "To arrive at the optimal training configuration presented in PLM [21], we first conduct ablation studies using a 20M subset of the data. In Tab. 9, we ablate the LLM sizes, training parameters, vision projector types, output layers to project, and encoder regularization. We evaluate across OCR Q&A, Captioning, Visual Q&A, and Video Q&A and find the best configuration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 422, + 347, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 422, + 347, + 471 + ], + "spans": [ + { + "bbox": [ + 67, + 422, + 347, + 471 + ], + "type": "text", + "content": "LLM Setup. We explore different scales (1B or 3B parameters) and freezing weights of the LLM. We observe that going from 1B to 3B parameters increases average score by 1.6 points " + }, + { + "bbox": [ + 67, + 422, + 347, + 471 + ], + "type": "inline_equation", + "content": "(76.5\\rightarrow 78.1)" + }, + { + "bbox": [ + 67, + 422, + 347, + 471 + ], + "type": "text", + "content": ". Unfreezing the LLM boosts this number to 78.4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 476, + 347, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 476, + 347, + 512 + ], + "spans": [ + { + "bbox": [ + 67, + 476, + 347, + 512 + ], + "type": "text", + "content": "Vision Projector. Using a 2-layer MLP vision projector instead of a linear layer improves the average score from 77.2 to 78.1, while only adding few parameters (13.5M → 27M)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 518, + 347, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 518, + 347, + 553 + ], + "spans": [ + { + "bbox": [ + 67, + 518, + 347, + 553 + ], + "type": "text", + "content": "PE Output Layer. As shown in §3, " + }, + { + "bbox": [ + 67, + 518, + 347, + 553 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 518, + 347, + 553 + ], + "type": "text", + "content": " has intermediate layers that perform significantly better than the last layer when used as features for certain tasks. However, it is not clear if that" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 363, + 350, + 545, + 510 + ], + "blocks": [ + { + "bbox": [ + 363, + 350, + 545, + 510 + ], + "lines": [ + { + "bbox": [ + 363, + 350, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 363, + 350, + 545, + 510 + ], + "type": "table", + "html": "
LLM scaleLLM unfrozen Regularization?ProjectorLayerAvg.OCR Q&A Average of 4
Average of 3Captioning Average of 3
LLM Setup
1BMLP4776.560.7115.176.054.0
3BMLP4778.165.9115.776.654.1
3BMLP4778.465.8117.676.353.7
Vision Projector
3BLinear4777.264.5114.176.553.7
3BMLP4778.165.9115.776.654.1
PE Output Layer
3BMLP5075.956.6116.776.553.7
3BMLP4778.165.9115.776.654.1
3BMLP4176.965.5112.875.453.9
PE Regularization
3BMLP4779.969.0117.577.455.6
3BMLP4780.168.7118.377.056.3
", + "image_path": "6aaf571ddc34b68dd60e42fa52c459e5fa0be4d384dfe35f17bc16668a48d9aa.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 361, + 512, + 542, + 545 + ], + "lines": [ + { + "bbox": [ + 361, + 512, + 542, + 545 + ], + "spans": [ + { + "bbox": [ + 361, + 512, + 542, + 545 + ], + "type": "text", + "content": "Table 9 Language Alignment. We find the best configuration to language align " + }, + { + "bbox": [ + 361, + 512, + 542, + 545 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 361, + 512, + 542, + 545 + ], + "type": "text", + "content": " using autoregressive language training." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 554, + 542, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 554, + 542, + 578 + ], + "spans": [ + { + "bbox": [ + 67, + 554, + 542, + 578 + ], + "type": "text", + "content": "same behavior applies when finetuning. We test applying the projector to layers 41, 47, and 50 (the last layer), and find that layer 47 works best. Incidentally, this is also the optimal layer for frozen VQ&A in Fig. 8." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 584, + 543, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 584, + 543, + 620 + ], + "spans": [ + { + "bbox": [ + 66, + 584, + 543, + 620 + ], + "type": "text", + "content": "PE Regularization. We apply LayerScale [135] and DropPath [50] to the vision encoder during the alignment, for stabilizing training. This improves the 78.1 average score to 79.9 (+1.8 points). Unfreezing the LLM boosts this number further to 80.1. We choose this configuration (last row) as our final alignment setup." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "spans": [ + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "text", + "content": "To construct " + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "text", + "content": ", we scale this recipe up the 70M samples mentioned above (more details in [21]). In summary, we use a pretrained Llama3.2 3B, unfrozen, with a 2-layer MLP as a vision projector on top of layer " + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "text", + "content": " layer 47 (with the last 3 discarded) and regularize the encoder with LayerScale and DropPath. Compared to the 20M sample ablation setting in Tab. 9, the final " + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 66, + 625, + 543, + 687 + ], + "type": "text", + "content": " trained on 70M total samples gives another +2.1 points to 82.2 on the average across OCR Q&A, Captioning, Visual Q&A, and Video Q&A." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 66, + 695, + 543, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 695, + 543, + 720 + ], + "spans": [ + { + "bbox": [ + 66, + 695, + 543, + 720 + ], + "type": "text", + "content": "Effects. The goal of alignment tuning is to lift the strong features found in intermediate layers of " + }, + { + "bbox": [ + 66, + 695, + 543, + 720 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 695, + 543, + 720 + ], + "type": "text", + "content": " described in §3 to the end of the network. To see if we actually accomplished that, we perform the same layerwise" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": "analysis as in Fig. 8 on our final " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " model and compare it to the original " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " checkpoint it was initialized from. We present the results of this analysis in Fig. 13, and immediately we see that language alignment was a success: across all categories, the performing layer for the aligned model was the last, no matter the performance of the original checkpoint. Notably, our " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " training mix did not contain grounding data, which means that this significantly lifted grounding performance is entirely due to the strong intermediate grounding features in " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " now being aligned to the end of the network. Moreover, specific domains such as OCR Q&A that were represented in the training mix see a significant boost to performance compared to even the best layer of " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": ", which was already strong. Thus, with an order of magnitude fewer samples compared to pretraining, we were able to language align " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " to create a single, strong encoder for all visual language modeling tasks. Following this success, we align " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " in a similar manner to construct " + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{L}" + }, + { + "bbox": [ + 67, + 64, + 312, + 304 + ], + "type": "text", + "content": " (see [21])." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 334, + 68, + 438, + 157 + ], + "blocks": [ + { + "bbox": [ + 334, + 68, + 438, + 157 + ], + "lines": [ + { + "bbox": [ + 334, + 68, + 438, + 157 + ], + "spans": [ + { + "bbox": [ + 334, + 68, + 438, + 157 + ], + "type": "image", + "image_path": "c532d458f803584390cf5e69b8ff8dfe0debb484e426f8af53a4c2f42efbf43c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 440, + 68, + 541, + 156 + ], + "blocks": [ + { + "bbox": [ + 440, + 68, + 541, + 156 + ], + "lines": [ + { + "bbox": [ + 440, + 68, + 541, + 156 + ], + "spans": [ + { + "bbox": [ + 440, + 68, + 541, + 156 + ], + "type": "image", + "image_path": "14b60cb5564f062b0f5bb2840805e4b8a21d292381a916d262d40bd14b58afec.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 335, + 160, + 436, + 249 + ], + "blocks": [ + { + "bbox": [ + 335, + 160, + 436, + 249 + ], + "lines": [ + { + "bbox": [ + 335, + 160, + 436, + 249 + ], + "spans": [ + { + "bbox": [ + 335, + 160, + 436, + 249 + ], + "type": "image", + "image_path": "daad2d2e328b97efc76360b8bc401940c602a89b5d2c0a1b5764d1fc9b3b3d3e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "lines": [ + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "spans": [ + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "type": "text", + "content": "Figure 13 Language Alignment. We analyze how language alignment changes the internal features of PE. Similar to our " + }, + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "type": "text", + "content": " analysis in Fig. 12, we extract " + }, + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 326, + 259, + 543, + 304 + ], + "type": "text", + "content": " and adapt each layer to a new LLM." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 441, + 159, + 542, + 249 + ], + "blocks": [ + { + "bbox": [ + 441, + 159, + 542, + 249 + ], + "lines": [ + { + "bbox": [ + 441, + 159, + 542, + 249 + ], + "spans": [ + { + "bbox": [ + 441, + 159, + 542, + 249 + ], + "type": "image", + "image_path": "78f9649d19944548bbb57462f9d14c6aafc7fcd268b77b96d9d92ea589b00fab.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 318, + 312, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 318, + 312, + 331 + ], + "spans": [ + { + "bbox": [ + 67, + 318, + 312, + 331 + ], + "type": "text", + "content": "4.2 Comparisons with Existing Vision Encoders" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "spans": [ + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "type": "text", + "content": "We compare " + }, + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 337, + 543, + 399 + ], + "type": "text", + "content": " with other vision encoders that are popular choices in MLLM literature: MetaCLIP [152], SigLIP2 [138], CLIP [106], AIMv2 [37], DINOv2 [98], and InternViT2.5 [18]. Overall, these encoders span several different pretraining losses (e.g., contrastive, captioning, self-supervised, and mixed supervision), encoder sizes (from 300M to 6B parameters), and resolutions (from 224 to 512). For all vision encoders, we find the best intermediate layers to train MLLM for fair comparison (more in Appendix B.2)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 407, + 543, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 407, + 543, + 444 + ], + "spans": [ + { + "bbox": [ + 67, + 407, + 543, + 444 + ], + "type": "text", + "content": "MLLM Benchmarking Setup. We connect each vision encoder, including " + }, + { + "bbox": [ + 67, + 407, + 543, + 444 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 407, + 543, + 444 + ], + "type": "text", + "content": ", to a language decoder with a fresh 2-layer MLP projector. Similar to the alignment stage, we first train only the projector on a subset of 1M image-text pairs from pretraining. Then, we train both the projector and LLM on 2.6M visual Q&A pairs," + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 66, + 450, + 547, + 663 + ], + "blocks": [ + { + "bbox": [ + 66, + 450, + 547, + 663 + ], + "lines": [ + { + "bbox": [ + 66, + 450, + 547, + 663 + ], + "spans": [ + { + "bbox": [ + 66, + 450, + 547, + 663 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [11]Avg. Ground RefLOCOg+ [56]Avg. VideoVideoMME Acc. [38]STAR Acc. [148]TGF-IQA Acc. [53]EgoScheme Acc. [89]MV-Bench Acc. [68]PerceptionTest Acc. [105]
CharQA Acc. [165]DocVQA Acc. [91]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1444.947.933.028.770.268.447.662.586.976.5110.587.5130.0114.160.653.946.151.066.458.649.451.9
MetaCLIP-G [152]1.8B224/1444.847.633.127.970.668.848.263.586.576.9111.186.5132.1114.860.553.145.050.766.456.048.751.9
PElang G†1.7B*224/1453.761.347.132.274.171.855.165.386.879.8116.491.0136.9121.265.755.547.355.768.959.648.652.9
576 Tokens per Image
CLIP [106]0.3B336/1453.561.749.532.870.172.760.763.987.378.9113.392.0132.9115.065.054.246.352.168.657.448.552.3
AIMv2-L [37]0.3B336/1453.361.648.032.171.473.762.764.387.780.1115.290.9135.6119.263.352.544.350.967.554.444.953.2
AIMv2 L Dist. [37]0.3B336/1453.761.149.431.572.774.162.864.888.380.3117.894.7137.5121.262.653.844.352.465.057.450.053.6
SigLIP2-so [138]0.4B384/1658.969.058.335.273.176.869.867.288.781.6116.592.1137.7119.867.454.545.553.167.257.649.354.5
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1466.976.873.641.176.176.268.566.089.181.3119.796.1139.6123.468.958.148.758.970.561.852.755.9
1024 Tokens per Image
InternViT 2.5 L [18]0.3B448/1460.674.159.235.973.174.265.464.487.679.6112.388.4133.7114.966.950.645.244.862.754.246.050.5
SigLIP2-so [138]0.4B512/1663.372.169.339.072.777.974.866.089.081.8117.493.5138.3120.269.655.846.255.467.062.050.054.5
PEcore L0.3B448/1459.468.762.536.669.774.767.764.388.378.7112.789.6133.4114.959.750.941.751.261.652.647.450.6
PElang L0.3B448/1471.181.081.946.475.077.173.065.589.380.8117.394.3137.3120.170.556.547.057.268.059.852.354.7
DINOv2-g [98]1.1B448/1430.019.614.724.261.561.019.360.488.675.8109.486.5131.6110.164.949.539.752.160.146.847.450.8
AIMv2 3B [37]2.7B448/1448.940.553.933.967.273.064.164.085.278.9115.793.8135.2118.136.154.645.154.566.755.451.754.3
InternViT2.5-6B [18]5.5B448/1459.972.359.435.272.575.568.964.988.280.2115.092.2136.3116.368.049.644.547.062.645.848.948.5
PEcore G1.9B448/1460.869.965.436.771.173.365.960.788.478.0112.591.6133.6112.466.652.042.353.162.951.448.853.6
PElang G†1.7B*448/1472.480.584.448.376.478.175.265.490.181.8120.196.6140.0123.671.358.048.060.169.462.052.456.0
", + "image_path": "f7ec4c21f4db6833171c5782c1dfa0e8273b68efcd9efc14890eb5c453a133c9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "lines": [ + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "text", + "content": "Table 10 MLLM Results with Llama 3.18B. We compare various vision encoders at their native resolution using Llama 3.1-instruct 8B [82] as the language model. The table compares models of similar class in number of vision tokens and parameters. " + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "text", + "content": " shows strong performance across all benchmarks, including against models " + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "inline_equation", + "content": "3\\times" + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "text", + "content": " its size. " + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "inline_equation", + "content": "{}^{*}\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "text", + "content": " has 1.7B parameters since we discard the last 3 layers during language alignment. " + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 67, + 666, + 542, + 711 + ], + "type": "text", + "content": " Interpolated without extra training." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 101 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 101 + ], + "type": "text", + "content": "image captions, and image grounding samples (see Appendix B.2 for details). We benchmark at the native resolution of each encoder (with higher resolution tiling results in Appendix C.4). Finally, we ablate over two language decoders, Llama 3.1 8B [82] and QwenLM 2.5 7B [155], to measure generalization across LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "spans": [ + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": "Results. Tab. 10 shows benchmarks results for native resolution input across existing encoders, " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": ". Notably, AIMv2 [37], InternViT2.5 [18], SigLIP2 [138] and " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " are trained jointly with a language decoder using next token prediction objective, and thus they perform better overall compared to the base contrastive and self-supervised models across all the metrics. However, " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " uses a fraction of the training FLOPs for language alignment tuning, while significantly outperforming all vision encoders by large margin (an average of " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "+3.5" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " points for G and " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "+2.0" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " points for L). Similarly, when tiling with 4 tiles and 1 thumbnail (see Appendix Tab. 30), both " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{L}" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 110, + 543, + 220 + ], + "type": "text", + "content": " outperform all existing vision encoders, including InternViT2.5 [18], which was specifically pretrained in a tiling setting and with grounding data. Appendix C.4, shows a breakdown of the RefCOCO results, as well as results for tiling with higher resolution." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "text", + "content": "Transferability. As " + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "text", + "content": " is aligned with Llama 3.2-instruct 3B, we conduct a separate set of experiments to check if our model performs well with a different base LLM. In Tab. 11 we repeat the native resolution comparison with QwenLM 2.5 7B [155]. Interestingly, " + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "text", + "content": " not only outperforms all vision encoders in this setting, but it also outperforms InternViT2.5 [18], which is specifically aligned to QwenLM 2 [154] throughout midtraining. In fact, " + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 226, + 544, + 300 + ], + "type": "text", + "content": " with QwenLM even improves its performance with Llama in some cases like with OCR Q&A and video benchmarks, emphasizing the generality of our language alignment." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 66, + 305, + 548, + 463 + ], + "blocks": [ + { + "bbox": [ + 66, + 305, + 548, + 463 + ], + "lines": [ + { + "bbox": [ + 66, + 305, + 548, + 463 + ], + "spans": [ + { + "bbox": [ + 66, + 305, + 548, + 463 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolution Batch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCRQAAvg. VQAAvg. Cap.Avg. Ground, ReCOC%+ [56]
CharQATextVQAFlicker CIDEr [157]Avg. Ground, ReCOC%+ [56]
Acc. [165]Acc. [125]COCO CIDEr [76]STAR Acc. [148]
DocVQADocVQANo Cap CIDEr [1]EGoSema Acc. [89]
Acc. [91]Acc. [92]Avg. Ground, ReCOC%+ [56]VideoOME Mec Aoc. [38]
Aoc. [57]Aoc. [73]Avg. VideoStAR Acc. [68]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1660.572.059.136.774.366.269.065.489.281.1116.391.6137.3120.070.057.051.355.866.061.051.955.7
SigLIP2-g-opt [138]1.1B384/1660.871.060.436.775.276.870.365.689.581.8118.896.4139.0121.169.958.352.057.668.162.052.857.4
PElang G†1.7B*336/1466.877.572.441.176.476.067.965.489.181.5118.894.6139.5122.370.160.254.661.769.863.654.357.2
1024 Tokens per Image
InternViT2.5 [18]0.3B448/1460.375.461.136.268.474.265.663.787.879.5112.188.5133.5114.168.155.850.354.766.659.050.653.8
SigLIP2-so [138]0.4B512/1666.377.271.942.473.977.974.265.689.981.8117.193.0138.0120.370.555.950.357.367.262.650.347.4
PEcore L0.3B448/1463.573.967.440.572.275.769.264.089.480.2113.388.7135.2115.966.557.349.657.867.760.852.355.5
PElang L0.3B448/1470.280.680.746.073.576.872.864.189.481.0116.493.4137.6118.170.458.351.659.867.462.253.455.4
DINOv2 [98]1.1B448/1431.321.714.724.664.361.018.959.588.976.9110.187.3132.1110.869.354.346.956.563.456.849.752.2
AIMv2 3B [37]2.7B448/1466.076.770.541.475.277.974.266.289.481.9119.296.4139.2122.067.656.345.958.067.860.851.453.9
InternViT2.5 [18]5.5B448/1464.278.265.339.673.676.470.164.589.381.7117.695.9138.4118.672.856.150.359.167.356.651.152.2
PEcore G1.9B448/1464.875.968.841.672.975.267.962.489.780.7113.191.7135.2112.370.557.048.758.366.960.852.954.5
PElang G1.7B*448/1472.981.683.749.576.777.974.964.590.381.9118.994.6139.8122.372.160.454.162.568.366.654.256.8
", + "image_path": "d4c9c0207cfe48c928432e95dc406954c073a838ea4989599c7687b773660fe9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "spans": [ + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "type": "text", + "content": "System-Level MLLM Comparison. In Tab. 12, we conduct a system-level comparison to the state-of-the-art open-access MLLMs: LLaVA-OneVision 7B [66], Gemma3 12B [132], Molmo-D 7B [25], Qwen2 VL 7B [144], InternVL 2.5 8B [18] and the very recent InternVL 3 8B [168]. Each baseline uses a contrastively pretrained ViT (SigLIP-so400M [160], CLIP-L [106], DFN-H [33], and InternViT 2.5 300M [18]). For our PLM-8B we use " + }, + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "type": "text", + "content": " as the vision encoder with 36 tiles for images and 32 frames for video and Llama 3.1-instruct 8B as the language decoder (more details in [21]). We show numbers from their respective works or evaluate them ourselves if they are not reported (except for Gemma and InternVL 3). PLM-8B outperforms all other models tested, emphasizing that " + }, + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 500, + 543, + 598 + ], + "type": "text", + "content": " can be used to drive strong results across a wide range of tasks." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 68, + 604, + 545, + 700 + ], + "blocks": [ + { + "bbox": [ + 67, + 465, + 543, + 489 + ], + "lines": [ + { + "bbox": [ + 67, + 465, + 543, + 489 + ], + "spans": [ + { + "bbox": [ + 67, + 465, + 543, + 489 + ], + "type": "text", + "content": "Table 11 MLLM Results with QwenLM 2.5 7B. Same setting as Tab. 10, but with QwenLM2.5 7B [155] as the language model. Although " + }, + { + "bbox": [ + 67, + 465, + 543, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 465, + 543, + 489 + ], + "type": "text", + "content": " is aligned to Llama3.2 3B, the language alignment transfers well to a different language model." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 604, + 545, + 700 + ], + "lines": [ + { + "bbox": [ + 68, + 604, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 68, + 604, + 545, + 700 + ], + "type": "table", + "html": "
ModelEncoderOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QA Acc. [165]Doc.VQA Acc. (test) [91]Info. QA Acc. (test) [92]Avg. VQA Text.VQA Acc. [125]OK-VQA Acc. [118]POPE Acc. [73]VQAV2 Acc. (val) [40]Avg. Cap. Flicker CIDEr [157]COCO CIDEr [76] No Cap CIDEr [1]Avg. Video Video.MME Acc. [38]STAR ACC. [148]TGIF-QA Acc. [53]EgoScheme (test) Acc. [89]MV.Bench Acc. [68]PerceptionTest Acc. (test) [105]
LLaVA-OV 7B [66]SigLIP-so400M81.480.086.768.890.179.977.369.689.283.579.555.770.7112.163.857.766.077.265.257.158.1
Gemma3 12B [132]SigLIP-so400M-75.787.164.9--67.7--71.6----------54.9
Qwen2 VL 7B [144]DFN-H86.683.694.576.591.780.983.667.988.383.893.779.9102.598.767.762.967.381.865.461.666.9
InternVL 2.5 8B [18]InternViT 2.5-300M87.084.693.077.692.879.979.369.290.680.6113.096.5125.8116.772.960.677.691.366.272.668.9
InternVL 3 8B [168]InternViT 2.5-300M87.286.692.776.892.6-80.2-91.1------66.3---75.4-
PLM-8BPElangG88.485.594.680.992.782.986.569.689.985.6127.4105.6146.7129.977.958.384.995.568.877.182.7
", + "image_path": "cfe4b15691ebaac0d28f672e8a216e8ab712efe6f793dc7edb7c6cbec161fe75.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 701, + 543, + 726 + ], + "lines": [ + { + "bbox": [ + 67, + 701, + 543, + 726 + ], + "spans": [ + { + "bbox": [ + 67, + 701, + 543, + 726 + ], + "type": "text", + "content": "Table 12 MLLM System-Level Comparison. We show a system-level comparison between PLM-8B based on " + }, + { + "bbox": [ + 67, + 701, + 543, + 726 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 701, + 543, + 726 + ], + "type": "text", + "content": " and popular open-access models of similar LLM scale using existing encoders. We report test set results where specified." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 328, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 328, + 79 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 328, + 79 + ], + "type": "text", + "content": "5 Perception Encoder: Spatial Alignment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "spans": [ + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "text", + "content": "While language alignment with a pretrained LLM decoder is well-established, the best way to spatially align a model is not obvious. As shown in §3, " + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "text", + "content": " already has features that perform well for spatial tasks. However, the layer that performs the best for higher level spatial tasks like detection or depth estimation (layer " + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "inline_equation", + "content": "\\sim 40" + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "text", + "content": ") is vastly different than the layer that performs the best for a pure spatial task like tracking (layer " + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "inline_equation", + "content": "\\sim 30" + }, + { + "bbox": [ + 66, + 89, + 544, + 186 + ], + "type": "text", + "content": "). While we were able to ignore this disparity during language alignment by aligning to an LLM decoder that could do all tasks, classical spatial tasks have decoders that come in all shapes and sizes. It would be impractical to simply align the model using all downstream decoders mirroring language alignment. Thus, we must first answer the question, what is happening in the features at those layers to make them useful for spatial tasks?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 201, + 207, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 201, + 207, + 213 + ], + "spans": [ + { + "bbox": [ + 67, + 201, + 207, + 213 + ], + "type": "text", + "content": "5.1 Core Feature Analysis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "spans": [ + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "type": "text", + "content": "We begin by analyzing the spatial properties of the features for " + }, + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "type": "text", + "content": " in the range of layers where it performed optimally for zero-shot tracking in §3. In Fig. 14, we plot (1) the pairwise feature cosine similarity between the pink token and all others, (2) the head average attention map for that token, and (3) the full attention matrix " + }, + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "type": "inline_equation", + "content": "(HW\\times HW)" + }, + { + "bbox": [ + 67, + 220, + 210, + 353 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 361, + 210, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 210, + 408 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 210, + 408 + ], + "type": "text", + "content": "An 18 Layer Decoder. Remarkably, the cause for the tracking performance peak at layer 32 is abundantly clear from observing" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 235, + 204, + 543, + 361 + ], + "blocks": [ + { + "bbox": [ + 235, + 204, + 543, + 361 + ], + "lines": [ + { + "bbox": [ + 235, + 204, + 543, + 361 + ], + "spans": [ + { + "bbox": [ + 235, + 204, + 543, + 361 + ], + "type": "image", + "image_path": "a542d95f67d6e1d95a991683d76f091ce129ea008cea4253e777d56226a43c1e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "lines": [ + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "spans": [ + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "type": "text", + "content": "Figure 14 " + }, + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "type": "inline_equation", + "content": "\\mathsf{PE}_{\\mathrm{core}}\\mathsf{G}" + }, + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "type": "text", + "content": " Feature Analysis. To understand the dichotomy between optimal " + }, + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "type": "inline_equation", + "content": "\\mathsf{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 222, + 369, + 542, + 403 + ], + "type": "text", + "content": " features for spatial tasks observed in Fig. 8, we analyze the spatial properties of the features between layers 30 and 34." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 408, + 543, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 408, + 543, + 456 + ], + "spans": [ + { + "bbox": [ + 66, + 408, + 543, + 456 + ], + "type": "text", + "content": "the visualizations. Up until layer 32, the attention maps remain local. However, that changes abruptly at layer 33, at which point several tokens in the background of the image become \"global\" tokens. As shown by the vertical lines in the full attention matrix, starting from layer 33 every token attends to them. Thus, every layer 33 and up become part of a decoder for global information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 462, + 543, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 462, + 543, + 536 + ], + "spans": [ + { + "bbox": [ + 66, + 462, + 543, + 536 + ], + "type": "text", + "content": "This is not a new phenomenon. Recent work [23] shows this happening in all modern vision transformers above L scale. But notably these \"global tokens\" are not necessarily harmful. Given the optimal layer for most tasks in Fig. 8 lies within the global token region, the information they aggregate is useful downstream. However, tracking in §3 is zero-shot and relies purely on spatial correspondences, meaning it cannot make use of the global tokens. This explains why tracking peaks right before their introduction, while tasks that rely on semantic understanding or have larger decoders that can benefit from them do well with the later layers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 549, + 230, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 549, + 230, + 562 + ], + "spans": [ + { + "bbox": [ + 67, + 549, + 230, + 562 + ], + "type": "text", + "content": "5.2 Spatial Alignment Method" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 66, + 568, + 543, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 568, + 543, + 641 + ], + "spans": [ + { + "bbox": [ + 66, + 568, + 543, + 641 + ], + "type": "text", + "content": "Given the analysis in §5.1, we have two objectives in creating a spatial alignment method: (1) we must preserve the optimal semantic information of the model (including the global tokens) that peaks around layer 40, and (2) we must do so while emphasizing local alignment in service of spatial tasks with shallow decoders. The first can be easily achieved by aligning with the model's own features (e.g., with MaskFeat [147]), but the second is more challenging. To accomplish this, we employ the Segment Anything Model (SAM) 2.1 [111] in a novel way to enforce spatial correspondence information in PE." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "spans": [ + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "type": "text", + "content": "Retaining Semantics. To retain the strong semantic features from " + }, + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "type": "text", + "content": ", we finetune the model with itself as a teacher. Specifically, we train the model to minimize the cosine similarity between its last layer and the frozen layer 41 features of its initialization (a layer around the peak for many tasks in Fig. 8). On its own this would be a tautology, so we apply heavy regularization to the student: DropPath [50] and LayerScale [135] similar to language alignment, as well as performing MaskFeat [147] with " + }, + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 66, + 650, + 543, + 711 + ], + "type": "text", + "content": " masking. We keep the teacher" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 312, + 752 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 89 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 89 + ], + "type": "text", + "content": "fixed in contrast to other state-of-the-art spatial models, which all employ an EMA teacher [98, 138]. This could potentially help, but we opt for simplicity." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 98, + 300, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 98, + 300, + 241 + ], + "spans": [ + { + "bbox": [ + 67, + 98, + 300, + 241 + ], + "type": "text", + "content": "Encouraging Locality. While we could \"retain\" locality by self-distilling from layer 32 features, that may be less effective as we are already distilling another layer of the model. Instead, we turn to a model that is explicitly tuned for locality: SAM [58, 111]. Notably, several works [110, 116, 119] have shown SAM to not be an effective teacher when distilling from multiple sources (though recently [45] has shown it can help with some tricks). However, upon observation of the raw features of SAM 2.1-L (Fig. 15), the main problem may be the same one we are currently trying to solve: SAM has global tokens as well! In this case," + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 322, + 91, + 543, + 194 + ], + "blocks": [ + { + "bbox": [ + 322, + 91, + 543, + 194 + ], + "lines": [ + { + "bbox": [ + 322, + 91, + 543, + 194 + ], + "spans": [ + { + "bbox": [ + 322, + 91, + 543, + 194 + ], + "type": "image", + "image_path": "d041c7193a207d97bff0767ce452ad71d55bf14f0d2698e25d892c6237ddce26.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 203, + 542, + 237 + ], + "lines": [ + { + "bbox": [ + 311, + 203, + 542, + 237 + ], + "spans": [ + { + "bbox": [ + 311, + 203, + 542, + 237 + ], + "type": "text", + "content": "Figure 15 SAM 2.1 Feature Similarity. The cosine similarity between the pink marked token and all others for SAM 2.1-L [111] features vs. our proposed mask logit features." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 241, + 492, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 241, + 492, + 253 + ], + "spans": [ + { + "bbox": [ + 67, + 241, + 492, + 253 + ], + "type": "text", + "content": "they appear as dark spots in a grid-like arrangement across all examples in Fig. 15 raw features." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "text", + "content": "Using the features of a model that itself has global tokens to mitigate the effect of global tokens is dubious at best. But, we don't have to use SAM's features to learn locality. At its core, SAM is a model that transforms points into spatially contiguous masks of select object. If what we want is smooth, locally consistent features, we can use the mask predictions themselves. Specifically, we query SAM 2.1-L with 1024 points arranged in a " + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "text", + "content": " grid. For each point, SAM returns a " + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "text", + "content": " mask logit the size of the image, which it normally would threshold and NMS. However, we instead concatenate those logits into a " + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "inline_equation", + "content": "H \\times W \\times 1024" + }, + { + "bbox": [ + 67, + 258, + 543, + 355 + ], + "type": "text", + "content": " tensor and use that as the feature map for alignment. This explicitly produces locally well-aligned features compared to the underlying feature space and has no spatial artifacts caused by global tokens, as shown in Fig. 15." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "type": "text", + "content": "Then to align, we distill the spatial correspondences between tokens by computing their pairwise cosine similarity for both the student and the teacher (creating a " + }, + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "type": "inline_equation", + "content": "HW \\times HW" + }, + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "type": "text", + "content": " matrix for each) and aligning them with MSE loss. Unlike SAM's underlying feature space (which [45] shows may be brittle to interpolation), the mask logit features are robust to interpolation, so we simply interpolate them down and train at the " + }, + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 361, + 543, + 433 + ], + "type": "text", + "content": " model's original 448px resolution. Finally, like for self-distillation we add the same masking and regularization. For both teachers, we apply loss to all tokens and add no extra parameters other than LayerScale." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "spans": [ + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "text", + "content": "Effects. Again, the goal of alignment is to lift the strong features already learned by the core model as shown in §3. Thus, like we did for language alignment in §4.1, we perform layerwise frozen feature analysis on spatial tasks in Fig. 16. This time, we evaluate the original " + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "text", + "content": " checkpoint as well " + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "text", + "content": " aligned to its own layer 41, to SAM 2.1 mask logits, and finally both. We denote aligning to both as " + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 441, + 298, + 538 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "type": "text", + "content": "Aligning purely based on the original model's layer 41 features performs well on detection, depth, and semantic segmentation, but falls short for zero-shot tracking, where precise locality is necessary to define boundaries between objects. In contrast, aligning to SAM 2.1 mask logits lowers last layer performance on every task except for tracking, where it significantly improves performance. Understandably, this is because the mask logits have little semantics (see Fig. 17). Thus, the optimal approach is to combine both teachers. As a result, " + }, + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "type": "text", + "content": " not only lifts the features for all tasks to the end of the network, but it also improves over self-alignment alone. Notably, " + }, + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 544, + 299, + 711 + ], + "type": "text", + "content": " s tracking performance is lower than" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 711, + 539, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 711, + 539, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 711, + 539, + 723 + ], + "type": "text", + "content": "the SAM-aligned model, but it is still ahead of other methods while being a generally good model, see §5.3." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 315, + 438, + 425, + 533 + ], + "blocks": [ + { + "bbox": [ + 315, + 438, + 425, + 533 + ], + "lines": [ + { + "bbox": [ + 315, + 438, + 425, + 533 + ], + "spans": [ + { + "bbox": [ + 315, + 438, + 425, + 533 + ], + "type": "image", + "image_path": "b8d97850bf3742315f6fb8c066d8dda1568ae7083e36a3556ca7fe5042281f80.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 428, + 438, + 536, + 533 + ], + "blocks": [ + { + "bbox": [ + 428, + 438, + 536, + 533 + ], + "lines": [ + { + "bbox": [ + 428, + 438, + 536, + 533 + ], + "spans": [ + { + "bbox": [ + 428, + 438, + 536, + 533 + ], + "type": "image", + "image_path": "3e39ae7eaea818e7ae127909f0ae634826915332b7a34afe41e57b9b94cbbbd5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 314, + 536, + 425, + 653 + ], + "blocks": [ + { + "bbox": [ + 314, + 536, + 425, + 653 + ], + "lines": [ + { + "bbox": [ + 314, + 536, + 425, + 653 + ], + "spans": [ + { + "bbox": [ + 314, + 536, + 425, + 653 + ], + "type": "image", + "image_path": "60d858a7dc9bdf799b7f99cce57d2f46fdf7293c74a4009040792d6e25d33c2a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "lines": [ + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "spans": [ + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "type": "text", + "content": "Figure 16 Spatial Alignment. We analyze how our two spatial alignment methods individually change the internal features of " + }, + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "type": "text", + "content": ". Then we combine both alignment methods to create " + }, + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}" + }, + { + "bbox": [ + 312, + 659, + 542, + 704 + ], + "type": "text", + "content": " (see Appendix B.3.1)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 428, + 536, + 536, + 653 + ], + "blocks": [ + { + "bbox": [ + 428, + 536, + 536, + 653 + ], + "lines": [ + { + "bbox": [ + 428, + 536, + 536, + 653 + ], + "spans": [ + { + "bbox": [ + 428, + 536, + 536, + 653 + ], + "type": "image", + "image_path": "1a404914c26bae32a66185bf5d5c70c669e15b4a0244ddd49a1a6aed2e99497c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "text", + "content": "Last Layer Feature Visualization. In Fig. 17, we visualize the last layer features for the " + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "text", + "content": " and the 3 aligned models, with similar colors denoting similar features. In the first column, we see why the last layer performance of " + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "text", + "content": " is so poor: while the last layer features contain information about the salient objects, they seem to have lost spatial coherence. Aligning to the model's own layer 41 features fixes this, but its spatial quality is lacking. In contrast, the model aligned to SAM 2.1 mask logits has locally clear features, but without semantics (similar objects have dissimilar features, see row 1 cats and row 2 cows). " + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "text", + "content": " using both teachers at once, retains the semantics of " + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 290, + 244 + ], + "type": "text", + "content": " while producing high quality spatial features." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 308, + 55, + 535, + 208 + ], + "blocks": [ + { + "bbox": [ + 308, + 55, + 535, + 208 + ], + "lines": [ + { + "bbox": [ + 308, + 55, + 535, + 208 + ], + "spans": [ + { + "bbox": [ + 308, + 55, + 535, + 208 + ], + "type": "image", + "image_path": "e230c7cf4ab7709671d53765702b74a3a0690485de5e999521b719874d64f7c2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 214, + 542, + 248 + ], + "lines": [ + { + "bbox": [ + 302, + 214, + 542, + 248 + ], + "spans": [ + { + "bbox": [ + 302, + 214, + 542, + 248 + ], + "type": "text", + "content": "Figure 17 Last Layer Visualization for the models in Fig. 16 using 3 dimensional PCA to map features to LCh color space (see Appendix B.3.2). More examples in Appendix C.5." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 70, + 262, + 325, + 363 + ], + "blocks": [ + { + "bbox": [ + 70, + 262, + 325, + 363 + ], + "lines": [ + { + "bbox": [ + 70, + 262, + 325, + 363 + ], + "spans": [ + { + "bbox": [ + 70, + 262, + 325, + 363 + ], + "type": "table", + "html": "
EncoderParamsResolutionTrackingSegmentationDepth
DAVIS (↑) [104]ADE20k (↑) [167]NYU (↓) [123]
BestLastIdxBestLastIdxBestLastIdx
OAI CLIP-L [106]0.3B224/1439.437.117/2439.438.319/24.366.39719/24
AIMv2-3B [37]2.7B448/1454.729.313/2441.631.920/24.311.32616/24
SigLIP-so [160]0.4B384/1448.736.316/2740.138.322/27.339.36921/27
SigLIP2-so [138]0.4B512/1651.445.315/2744.042.924/27.306.32925/27
SigLIP2-g-opt [138]1.1B384/1643.538.832/4042.141.334/40.302.32434/40
DINOv2-L [98]0.3B448/1458.758.223/2447.347.324/24.297.30823/24
DINOv2-g [98]1.1B448/1458.558.540/4048.748.437/40.279.29027/40
PEcoreG1.9B448/1456.842.832/5041.538.644/50.249.30939/50
PEspatialG1.9B448/1461.561.550/5049.348.949/50.262.27546/50
", + "image_path": "a5560ec88cc71a74991ee5cc6e041c018b9aaa38f6f3e9e3aff01f61f4f5de3c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 337, + 262, + 538, + 371 + ], + "blocks": [ + { + "bbox": [ + 67, + 366, + 331, + 410 + ], + "lines": [ + { + "bbox": [ + 67, + 366, + 331, + 410 + ], + "spans": [ + { + "bbox": [ + 67, + 366, + 331, + 410 + ], + "type": "text", + "content": "Table 13 Frozen Feature Dense Prediction including zero-shot tracking, semantic segmentation and depth estimation. We report best and last layer performance, along with which layer was best for each model. See Appendix B.3.3 for experimental settings." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 337, + 262, + 538, + 371 + ], + "lines": [ + { + "bbox": [ + 337, + 262, + 538, + 371 + ], + "spans": [ + { + "bbox": [ + 337, + 262, + 538, + 371 + ], + "type": "table", + "html": "
EncoderParamsPretrain ResolutionLVIS [41]COCO [76]
APboxAPmaskAPboxAPmask
OAI CLIP-L [106]0.3B224/1445.041.954.047.5
MetaCLIP-G [152]1.8B224/1445.141.953.246.7
SigLIP-so [160]0.4B224/1445.041.954.447.6
MAE-L [44]0.3B224/1446.143.955.649.3
EVA02-L [35]0.3B224/1449.345.254.948.2
SigLIP2-so [138]0.4B512/1649.345.656.049.4
SigLIP2-g-opt [138]1.1B384/1652.948.557.150.2
DINOv2-L [98]0.3B518/1446.743.555.749.0
DINOv2-g [98]1.1B518/1451.547.357.250.0
PEcoreG1.9B448/1451.947.957.049.8
PEspatialG1.9B448/1454.249.357.850.3
", + "image_path": "09866ed878a98816d1da5b6fe224d8be814c643218717faa1346ddba0c7367a8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 427, + 313, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 427, + 313, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 427, + 313, + 441 + ], + "type": "text", + "content": "5.3 Comparisons with Existing Vision Encoders" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "spans": [ + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "type": "text", + "content": "Frozen Feature Dense Prediction. In Tab. 13, we compare different vision encoder's frozen features on three dense prediction tasks: DAVIS tracking [104] (J&F) following the training-free setting from [52, 107], ADE20k semantic segmentation [167] (mIoU) linear probing, and NYU depth estimation [123] (RMSE) with a DPT head [109]. For each model, we report both its best layer and last layer performance. Across the board, " + }, + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "type": "text", + "content": " performs outperforms other state-of-the-art spatial models, with its best features being much better aligned to the last layer than the " + }, + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 450, + 543, + 536 + ], + "type": "text", + "content": " it started from. Notably, SigLIP2, which during pretraining combines spatial, captioning, and contrastive losses [138] is not aligned well to the last layer in comparison." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "spans": [ + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "text", + "content": "End-to-End Finetuning Detection and Segmentation. In Tab. 14, we compare " + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "text", + "content": " with other popular vision encoders in the standard full-finetuning ViTDet [72] Mask-RCNN [43] setting using COCO [76] and LVIS [41] as benchmarks. In this controlled experiment, " + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "text", + "content": " is state-of-the-art among various vision backbones. This is significant, as contrastive encoders (especially large ones like MetaCLIP-G [152]) usually perform very poorly on detection, with smaller models often performing better. Typically, encoders only scale for detection if using spatial pretraining or a significant amount of detection data [98] is used to align them directly to downstream tasks. In contrast, " + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 66, + 544, + 543, + 628 + ], + "type": "text", + "content": " uses no detection data for alignment, making it general." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "type": "text", + "content": "System-Level Detection. In Tab. 15, we provide a system-level end-to-end finetuning comparison vs. the absolute state-of-the-art in COCO detection. With only Object365 [120] as extra detection data, " + }, + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "type": "text", + "content": " can match the performance of more complex models tuned for detection, while only using a simple DETR-style decoder [12, 99]. " + }, + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 636, + 337, + 721 + ], + "type": "text", + "content": " marks the first general, contrastively pretrained model to accomplish this." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 356, + 635, + 535, + 696 + ], + "blocks": [ + { + "bbox": [ + 337, + 375, + 539, + 407 + ], + "lines": [ + { + "bbox": [ + 337, + 375, + 539, + 407 + ], + "spans": [ + { + "bbox": [ + 337, + 375, + 539, + 407 + ], + "type": "text", + "content": "Table 14 End-to-End Finetuning Detection and Segmentation using Mask R-CNN [43] and VitDet [72] in a controlled setting. Details in Appendix B.3.4." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 356, + 635, + 535, + 696 + ], + "lines": [ + { + "bbox": [ + 356, + 635, + 535, + 696 + ], + "spans": [ + { + "bbox": [ + 356, + 635, + 535, + 696 + ], + "type": "table", + "html": "
EncoderParamsDetectorCOCO APbox
SwinV2-G [80]3.0BHTC++ [14]62.5
Swin-L [79]0.3BDINO [161]63.2
EVA02-L [35]0.3BCascade [11]64.1
InternImage-G [145]3.0BDINO [161]65.3
EVA02-L [35]0.3BCoDETR [169]65.9
PEspatialG1.9BDETA [99]66.0
", + "image_path": "8a7f45e906b49ad1978fa1578eb35d9ff55a0a97c4468b17d2db66e85fd3b4a2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 350, + 698, + 542, + 731 + ], + "lines": [ + { + "bbox": [ + 350, + 698, + 542, + 731 + ], + "spans": [ + { + "bbox": [ + 350, + 698, + 542, + 731 + ], + "type": "text", + "content": "Table 15 System-Level Comparison on Detection. Comparing to the leading results on COCO [76] val2017. See Appendix B.3.5 for training recipe." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 64, + 173, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 64, + 173, + 77 + ], + "spans": [ + { + "bbox": [ + 68, + 64, + 173, + 77 + ], + "type": "text", + "content": "6 Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 90, + 541, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 90, + 541, + 150 + ], + "spans": [ + { + "bbox": [ + 67, + 90, + 541, + 150 + ], + "type": "text", + "content": "Learning vision-semantic representations has long been the leading approach for developing foundational models in perception. By aligning visual and textual representations, these models excel not only in vision tasks such as zero-shot image classification and image-text retrieval [51, 106, 117], open-vocabulary detection [63, 94, 95] and segmentation [22, 28], but also serve as the basis for multi-modal large language models (MLLMs) [3, 5, 78, 93, 101, 134]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 159, + 542, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 159, + 542, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 159, + 542, + 304 + ], + "type": "text", + "content": "Contrastive Language-Image Pretraining. The early works of Virtex [27], ICMLM [115], and ConViRT [163] developed the techniques for learning through contrastive objectives between vision and language modalities. Subsequently, vision encoders such as CLIP [51, 106] and ALIGN [54] scaled these techniques to much larger datasets and model sizes, popularizing vision-language contrastive learning. A series of open-weight contrastive models have been developed to enhance the performance and robustness of CLIP [33, 71, 117, 129, 152, 160]. For instance, SigLIP [160] replaces the traditional softmax with a sigmoid function in contrastive learning, while FLIP [74] employs masking techniques to expedite the training process. We are among this effort and build a state-of-the-art open Perception Encoder (PE) (§2.1). Other objectives that have proven useful for building visual encoders include captioning loss, which learns to predict image descriptions using a language model decoder and transfers well to downstream multi-modal language modeling tasks [37, 137]. Many works are now attempting to combine two or more objectives to address different downstream tasks through pretraining with multiple objectives [37, 158] or training sequentially [19, 66]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 312, + 541, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 312, + 541, + 407 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 541, + 407 + ], + "type": "text", + "content": "Efficient Training. Various axes of efficient training of clip models have been explored. BASIC [102] and LAION [117] explored scaling the batch size up to 160K, and shows the benefits of large batch sizes during training. EVA-CLIP [130] uses LAMB optimizer [156] for large batch training of clip models. Rotary positional embedding (RoPE) [127] has been successfully adopted in large language models. In vision transformers [2, 48] adopted 2D rotatory positional embeddings. For data engine, a series of works focus on large-scale sourcing and filtering through efficient data curation [33, 39, 117, 152] and explore recaptioning training images using MLLMs or VLMs [32, 64, 96, 151]. We extend these concepts to build a video data engine and scale our model to function as one strong model for both image and video (§2.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 418, + 542, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 418, + 542, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 418, + 542, + 597 + ], + "type": "text", + "content": "Best Embedding Layer Inside the Network. Typically, most vision encoders rely on the last layer to extract features for the task it is trained on. However, when trained on proxy or self-supervised tasks, the last layer is often not the ideal candidate for other tasks [8, 15, 16, 30, 85, 107, 121, 128, 142, 159, 166]. For example, when using image colorization as pretraining objective, [162, 166] showed that the middle layers were better at image classification compared to last layers. Subsequently, in iGPT [15], when trained for next token prediction, intermediate layers performed better at image classification. AIMv1 [30] also showed similar behavior for image based next token prediction with patch normalized MSE loss. Toto [107] showed this can be extended for next token prediction in videos, and intermediate layers are best for image classification, video classification, tracking and robotics. REPA [159] showed this behavior for image generation models, where the intermediate layers of SiT [85] has better linear probing accuracy compared to earlier or later layers. In CLIP models, CLIPer [128] identified that early layers in CLIP possess good spatial understanding. In contrast to these lines of work, in this paper, we first show this behavior is not limited to one class of encoders. Specifically, we show this behavior exists in a spatially self-supervised model [98], generative captioning model [37], and also in our own PE. Then we study this behavior for PE encoder in depth, and show it is possible for CLIP training to produce rich spatial and semantic features in intermediate layers (§3)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "type": "text", + "content": "Alignment Tuning. We explore alignment tuning for language (§4) and for spatial understanding (§5). For language alignment, we focus on adapting to multimodal large language models (MLLMs); for spatial alignment, we employ self-distillation of the models own features combined with a teacher for locality. In MLLM literature, midtraining—i.e., a middle stage of training used to exploit large-scale multimodal data—has been actively studied. LLaVA-OneVision [66], InternVL series [18, 19], QwenVL series [3, 144], and several other leading MLLMs [82, 132] adopt this paradigm. Our " + }, + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "type": "text", + "content": " can be seen as a variant of midtraining, but with one critical difference in principle: our goal is not to build the best MLLM, but to make the vision encoder the most general. Throughout §4, we benchmark our " + }, + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 606, + 541, + 715 + ], + "type": "text", + "content": " across different language models, input resolution, on various tasks for image and video to show this generality. For spatial tasks, we utilize the hidden embeddings" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "type": "text", + "content": "in the intermediate layers. Recently, several works showed the effectiveness of distilling teacher model via representation alignment with cosine similarity. REPA [159] distilled an early layer features of DINO for image diffusion models, RADIO [110] used multi-teacher distillation (DINO, CLIP and SAM). The key idea is to borrow semantic understanding (e.g., CLIP) and spatial understanding (e.g., SAM, DINO) of a pretrained vision encoders. In our " + }, + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "type": "text", + "content": ", we exploit the intermediate features of " + }, + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 64, + 543, + 138 + ], + "type": "text", + "content": " for semantics, and a novel way to use SAM for spatial understanding." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 154, + 160, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 154, + 160, + 168 + ], + "spans": [ + { + "bbox": [ + 67, + 154, + 160, + 168 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "spans": [ + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": "We have presented Perception Encoders (PE), a family of best-in-class foundation models comprising " + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": ". We have shown that " + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": " can outperform models trained with WebLI and JFT-3B, which were previously the undisputed leaders in zero-shot image recognition, while also excelling in zero-shot video recognition. We have demonstrated that " + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": " can be used to build a multimodal language model [21] that is at the forefront of the field in terms of performance. We have established that " + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 180, + 544, + 279 + ], + "type": "text", + "content": " can match the long-standing state-of-the-art in object detection with a significantly simpler decoder. Throughout all of this, one conclusion is abundantly clear: Perception Encoder unlocks the potential to scale simple contrastive vision-language pretraining to address a wide range of downstream vision tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 285, + 544, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 544, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 544, + 384 + ], + "type": "text", + "content": "Additional Contributors and Acknowledgments. We would like to thank Abhimanyu Dubey, Adel Ahmadyan, Andrew Westbury, Arkabandhu Chowdhury, Azita Shokrpour, Babak Damavandi, Chay Ryali, Cyprien de Lichy, Didac Suris Coll-Vinent, Dong Wang, Filip Radenovic, George Orlin, Han Zou, Harry Tran, Jitendra Malik, Joelle Pineau, Joseph Greer, Kavya Srinet, Kirmani Ahmed, Laura Gustafson, Lu Zhang, Muhammad Maaz, Natalia Neverova, Nicolas Carion, Oleksandr Maksymets, Ramya Raghavendra, Romy Luo, Ronghang Hu, Sam Doud, Sasha Mitts, Sean Bell, Shane Moon, Shuming Hu, Soerian Lieve, Stephane Kasriel, Valentin Gabeur, Vanessa Stark, Vignesh Ramanathan, Vivian Lee, Xuan Hu, Yang Li, and Ziyang Wang for their contributions and support for the project. And we thank you, the reader, for reading this far." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 204, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 204, + 79 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 204, + 79 + ], + "type": "text", + "content": "A Video Data Engine" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 168, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 168, + 105 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 168, + 105 + ], + "type": "text", + "content": "A.1 Video Caption" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 114, + 189, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 114, + 189, + 126 + ], + "spans": [ + { + "bbox": [ + 67, + 114, + 189, + 126 + ], + "type": "text", + "content": "LLM Summarization prompt" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 135, + 212, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 135, + 212, + 144 + ], + "spans": [ + { + "bbox": [ + 72, + 135, + 212, + 144 + ], + "type": "text", + "content": "LLM Summarization prompt 72 tokens" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 147, + 469, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 147, + 469, + 156 + ], + "spans": [ + { + "bbox": [ + 72, + 147, + 469, + 156 + ], + "type": "text", + "content": "Create a concise caption of a video using the provided metadata, video caption, and frame captions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 74, + 157, + 536, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 157, + 536, + 175 + ], + "spans": [ + { + "bbox": [ + 74, + 157, + 536, + 175 + ], + "type": "text", + "content": "TASK: Extract key information from the captions and combine it into an alt text format using single phrase or set of phrases that includes all relevant details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 175, + 139, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 175, + 139, + 185 + ], + "spans": [ + { + "bbox": [ + 74, + 175, + 139, + 185 + ], + "type": "text", + "content": "Steps to Follow:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 185, + 536, + 259 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 74, + 185, + 536, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 185, + 536, + 204 + ], + "spans": [ + { + "bbox": [ + 74, + 185, + 536, + 204 + ], + "type": "text", + "content": "1. Review the metadata (title and description) for general context, you can rely it for entity names but do not rely on it as the primary source of information for your caption." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 74, + 205, + 432, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 205, + 432, + 213 + ], + "spans": [ + { + "bbox": [ + 74, + 205, + 432, + 213 + ], + "type": "text", + "content": "2 . Blend title / description with video caption and frame captions for the main storyline" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 74, + 214, + 287, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 214, + 287, + 222 + ], + "spans": [ + { + "bbox": [ + 74, + 214, + 287, + 222 + ], + "type": "text", + "content": "3. Extract the most relevant and concise information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 74, + 223, + 536, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 223, + 536, + 241 + ], + "spans": [ + { + "bbox": [ + 74, + 223, + 536, + 241 + ], + "type": "text", + "content": "4. Combine extracted information into a alt text format using short phrase or set of phrases with approximately 120 tokens, considering special characters like comma as part of the token count." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 242, + 391, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 242, + 391, + 251 + ], + "spans": [ + { + "bbox": [ + 74, + 242, + 391, + 251 + ], + "type": "text", + "content": "5. Prioritize including all key information over sentence structure or grammar." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 74, + 251, + 359, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 251, + 359, + 259 + ], + "spans": [ + { + "bbox": [ + 74, + 251, + 359, + 259 + ], + "type": "text", + "content": "6. Minimize the use of special characters and focus of key information." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 74, + 261, + 132, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 261, + 132, + 269 + ], + "spans": [ + { + "bbox": [ + 74, + 261, + 132, + 269 + ], + "type": "text", + "content": "What to Avoid:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 74, + 270, + 432, + 289 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 74, + 270, + 432, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 270, + 432, + 279 + ], + "spans": [ + { + "bbox": [ + 74, + 270, + 432, + 279 + ], + "type": "text", + "content": "- Avoid adding or inferring information not present in the original metadata and captions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 74, + 280, + 360, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 280, + 360, + 289 + ], + "spans": [ + { + "bbox": [ + 74, + 280, + 360, + 289 + ], + "type": "text", + "content": "- Avoid using complex sentence structures or prioritizing sentence flow." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 74, + 290, + 454, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 290, + 454, + 298 + ], + "spans": [ + { + "bbox": [ + 74, + 290, + 454, + 298 + ], + "type": "text", + "content": "Create a concise caption of the video based on the metadata, video caption, and frame captions." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 316, + 224, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 316, + 224, + 327 + ], + "spans": [ + { + "bbox": [ + 67, + 316, + 224, + 327 + ], + "type": "text", + "content": "A.2 PE Video Dataset Details" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 67, + 335, + 542, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 335, + 542, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 335, + 542, + 370 + ], + "type": "text", + "content": "PE Video is a dataset that we collected and curated from a licensed data source. The videos are high-resolution and high-quality with a focus on motion. The total number of videos is 1M. Among these, 120K videos have human-refined video captions, and we selected 15K from the 120K videos as a benchmark." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 384, + 220, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 220, + 396 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 220, + 396 + ], + "type": "text", + "content": "A.2.1 Video Data Filtering Pipeline" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 403, + 542, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 403, + 542, + 440 + ], + "spans": [ + { + "bbox": [ + 67, + 403, + 542, + 440 + ], + "type": "text", + "content": "The goal of video data filtering is to identify videos that contain motions such as object motion, camera motion, interaction between objects, human actions, sequences of actions, and manipulation of objects, while rejecting videos with static scenes, like landscapes, or those that are artificial or highly edited." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 67, + 445, + 441, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 445, + 441, + 457 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 441, + 457 + ], + "type": "text", + "content": "To achieve this, we created a video filtering pipeline consisting of the following steps:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 67, + 466, + 542, + 696 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 67, + 466, + 542, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 466, + 542, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 466, + 542, + 502 + ], + "type": "text", + "content": "Step1: Compute motion features. For each video, we compute a list of features from video frames, including frames per second (fps), number of frames, number of I-frames, motion vector magnitude, and motion vector variance, using off-the-shelf tools like OpenCV [10]." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 67, + 512, + 541, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 512, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 67, + 512, + 541, + 536 + ], + "type": "text", + "content": "Step 2: Extract video frame features. For each video, we uniformly sample three frames and encode them using a DINOv2 model [98] and a SigLIP model [160]." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 67, + 544, + 542, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 542, + 582 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 542, + 582 + ], + "type": "text", + "content": "Step 3: LLM Features. For each video, we also run a multimodal large language model (LLM) like LlamaOnevision QwenLM 2 0.5B [66] to extract MLLM features. We composed a list of 26 questions and performed MLLM inference on the videos. The questions can be found here in §A.2.2." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 67, + 590, + 542, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 542, + 663 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 542, + 663 + ], + "type": "text", + "content": "Step 4: Video Quality Scoring. We combine all the features collected so far and use a random forest model to predict a score between 0 and 5. To train the model, we manually annotated approximately 1,000 videos with scores between 0 and 5. A low score indicates that the video is almost static and can be nearly summarized by a single frame, while a high score indicates that there are multiple temporal events in the video, requiring several frames to accurately caption it. We use these annotated videos as training data to fit a random forest model for video quality score prediction." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 67, + 671, + 542, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 671, + 542, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 671, + 542, + 696 + ], + "type": "text", + "content": "Step 5: We apply k-means clustering to the videos and rank them within each cluster. By selecting the top-ranked videos from each cluster, we effectively reduce the number of duplicated videos in the final dataset." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 64, + 202, + 75 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 64, + 202, + 75 + ], + "spans": [ + { + "bbox": [ + 68, + 64, + 202, + 75 + ], + "type": "text", + "content": "A.2.2 LLM Feature Extraction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 86, + 220, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 86, + 220, + 95 + ], + "spans": [ + { + "bbox": [ + 73, + 86, + 220, + 95 + ], + "type": "text", + "content": "LLM Feature extraction question list" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 98, + 308, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 98, + 308, + 107 + ], + "spans": [ + { + "bbox": [ + 73, + 98, + 308, + 107 + ], + "type": "text", + "content": "Is the camera capturing the scene static? Reply yes or no." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 74, + 108, + 307, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 108, + 307, + 116 + ], + "spans": [ + { + "bbox": [ + 74, + 108, + 307, + 116 + ], + "type": "text", + "content": "Is the camera capturing the scene moving? Reply yes or no." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 117, + 283, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 117, + 283, + 125 + ], + "spans": [ + { + "bbox": [ + 74, + 117, + 283, + 125 + ], + "type": "text", + "content": "Is the video capturing a landscape? Reply yes or no." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 74, + 126, + 295, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 126, + 295, + 135 + ], + "spans": [ + { + "bbox": [ + 74, + 126, + 295, + 135 + ], + "type": "text", + "content": "Is the video capturing a static scene? Reply yes or no." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 136, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 136, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 74, + 136, + 294, + 144 + ], + "type": "text", + "content": "Is the scene captured from a distance? Reply yes or no." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 145, + 283, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 145, + 283, + 154 + ], + "spans": [ + { + "bbox": [ + 74, + 145, + 283, + 154 + ], + "type": "text", + "content": "Is the video captured with a drone? Reply yes or no." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 74, + 155, + 271, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 155, + 271, + 163 + ], + "spans": [ + { + "bbox": [ + 74, + 155, + 271, + 163 + ], + "type": "text", + "content": "Is the video computer-generated? Reply yes or no." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 74, + 164, + 263, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 164, + 263, + 173 + ], + "spans": [ + { + "bbox": [ + 74, + 164, + 263, + 173 + ], + "type": "text", + "content": "Is the video content abstract? Reply yes or no." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 74, + 174, + 320, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 174, + 320, + 182 + ], + "spans": [ + { + "bbox": [ + 74, + 174, + 320, + 182 + ], + "type": "text", + "content": "Is there something moving through the scene? Reply yes or no." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 183, + 327, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 183, + 327, + 192 + ], + "spans": [ + { + "bbox": [ + 74, + 183, + 327, + 192 + ], + "type": "text", + "content": "Is there someone doing something in the video? Reply yes or no." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 74, + 193, + 323, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 193, + 323, + 201 + ], + "spans": [ + { + "bbox": [ + 74, + 193, + 323, + 201 + ], + "type": "text", + "content": "Are there several things moving in the video? Reply yes or no." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 74, + 202, + 323, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 202, + 323, + 211 + ], + "spans": [ + { + "bbox": [ + 74, + 202, + 323, + 211 + ], + "type": "text", + "content": "Is there an object that is being manipulated? Reply yes or no." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 74, + 212, + 267, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 212, + 267, + 220 + ], + "spans": [ + { + "bbox": [ + 74, + 212, + 267, + 220 + ], + "type": "text", + "content": "Are there animals in the video? Reply yes or no." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 74, + 221, + 251, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 221, + 251, + 230 + ], + "spans": [ + { + "bbox": [ + 74, + 221, + 251, + 230 + ], + "type": "text", + "content": "Is the scene mostly static? Reply yes or no." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 74, + 231, + 326, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 231, + 326, + 239 + ], + "spans": [ + { + "bbox": [ + 74, + 231, + 326, + 239 + ], + "type": "text", + "content": "Are things occluding each other in this video? Reply yes or no." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 74, + 240, + 402, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 240, + 402, + 249 + ], + "spans": [ + { + "bbox": [ + 74, + 240, + 402, + 249 + ], + "type": "text", + "content": "Is there something obstructing the view apart from the watermark? Reply yes or no." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 74, + 250, + 331, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 250, + 331, + 258 + ], + "spans": [ + { + "bbox": [ + 74, + 250, + 331, + 258 + ], + "type": "text", + "content": "Is there a large number of things in the video? Reply yes or no." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 74, + 259, + 354, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 259, + 354, + 268 + ], + "spans": [ + { + "bbox": [ + 74, + 259, + 354, + 268 + ], + "type": "text", + "content": "Are there more than 5 different objects in the video? Reply yes or no." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 74, + 269, + 438, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 269, + 438, + 277 + ], + "spans": [ + { + "bbox": [ + 74, + 269, + 438, + 277 + ], + "type": "text", + "content": "Is it hard to keep track of some entities because they are moving so much? Reply yes or no." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 74, + 278, + 386, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 278, + 386, + 286 + ], + "spans": [ + { + "bbox": [ + 74, + 278, + 386, + 286 + ], + "type": "text", + "content": "Is someone looking at a phone, a tablet or a computer screen? Reply yes or no." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 74, + 288, + 470, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 288, + 470, + 296 + ], + "spans": [ + { + "bbox": [ + 74, + 288, + 470, + 296 + ], + "type": "text", + "content": "Are they looking at a phone, a tablet or a computer screen during the whole video? Reply yes or no." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 74, + 297, + 331, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 297, + 331, + 306 + ], + "spans": [ + { + "bbox": [ + 74, + 297, + 331, + 306 + ], + "type": "text", + "content": "Are there several moving persons in this video? Reply yes or no." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 74, + 307, + 331, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 307, + 331, + 316 + ], + "spans": [ + { + "bbox": [ + 74, + 307, + 331, + 316 + ], + "type": "text", + "content": "Are there several moving animals in this video? Reply yes or no." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 74, + 316, + 302, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 316, + 302, + 324 + ], + "spans": [ + { + "bbox": [ + 74, + 316, + 302, + 324 + ], + "type": "text", + "content": "Are there several objects in this video? Reply yes or no." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 74, + 326, + 362, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 326, + 362, + 334 + ], + "spans": [ + { + "bbox": [ + 74, + 326, + 362, + 334 + ], + "type": "text", + "content": "Are there several similar-looking objects in the video? Reply yes or no." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 74, + 335, + 227, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 335, + 227, + 343 + ], + "spans": [ + { + "bbox": [ + 74, + 335, + 227, + 343 + ], + "type": "text", + "content": "Do they look similar? Reply yes or no." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 67, + 351, + 542, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 351, + 542, + 399 + ], + "spans": [ + { + "bbox": [ + 67, + 351, + 542, + 399 + ], + "type": "text", + "content": "We use LLaVA-OneVision [78] model to extract LLM features from the videos. For each video, we prompt with 26 different questions to extract features ranging from, \"is the video a landscape video?\" to, \"are there any moving objects in the video?\" The features are then used by a random forest model to determine the video quality score." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 67, + 414, + 223, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 414, + 223, + 424 + ], + "spans": [ + { + "bbox": [ + 67, + 414, + 223, + 424 + ], + "type": "text", + "content": "A.2.3 PVD Benchmark Distribution" + } + ] + } + ], + "index": 29 + }, + { + "type": "table", + "bbox": [ + 230, + 437, + 380, + 533 + ], + "blocks": [ + { + "bbox": [ + 230, + 437, + 380, + 533 + ], + "lines": [ + { + "bbox": [ + 230, + 437, + 380, + 533 + ], + "spans": [ + { + "bbox": [ + 230, + 437, + 380, + 533 + ], + "type": "table", + "html": "
CategoryNumber of videosAvg. Caption Length
Hand Actions214354.2
Object Interactions186442.6
Food Preparation169156.8
Work Activities168947.8
Outdoor Scenes155850.7
Animals142350.9
Water Scenes133744.6
Object Handling130751.6
Close-up Shots112245.1
Nature Scenes86638.4
", + "image_path": "3b9c2fc708c6ae91f43db0079c0a53d5a6bfe209c1b5de951dd81e4a3cdb737b.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "table_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 67, + 535, + 542, + 559 + ], + "lines": [ + { + "bbox": [ + 67, + 535, + 542, + 559 + ], + "spans": [ + { + "bbox": [ + 67, + 535, + 542, + 559 + ], + "type": "text", + "content": "Table 16 PVD Benchmark Statistics. We created a dataset of 15K videos together with human-verified captions. The videos are motion-centered, covering both first-person and third-person views with a wide coverage of scenes." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 119, + 302, + 165 + ], + "blocks": [ + { + "bbox": [ + 71, + 119, + 302, + 165 + ], + "lines": [ + { + "bbox": [ + 71, + 119, + 302, + 165 + ], + "spans": [ + { + "bbox": [ + 71, + 119, + 302, + 165 + ], + "type": "image", + "image_path": "f60491c1bd688dfc6de41ba84a4f1eabcb23cc6b5712bda279c86259deaa53bc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 154, + 173, + 220, + 180 + ], + "lines": [ + { + "bbox": [ + 154, + 173, + 220, + 180 + ], + "spans": [ + { + "bbox": [ + 154, + 173, + 220, + 180 + ], + "type": "text", + "content": "Category: Hand Actions" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 182, + 296, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 182, + 296, + 199 + ], + "spans": [ + { + "bbox": [ + 78, + 182, + 296, + 199 + ], + "type": "text", + "content": "Caption: The video captures a closeup shot of person typing on a keyboard. The camera moves from the left side of the keyboard to the right, an animation of the revolving globe and some numbers can be seen in the frame and the video ends." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 121, + 539, + 165 + ], + "blocks": [ + { + "bbox": [ + 307, + 121, + 539, + 165 + ], + "lines": [ + { + "bbox": [ + 307, + 121, + 539, + 165 + ], + "spans": [ + { + "bbox": [ + 307, + 121, + 539, + 165 + ], + "type": "image", + "image_path": "6e9ec06538492e622f82c5cc96b947e2ebc7d1e0c82c1caab229a7bc02bc9d11.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 383, + 176, + 463, + 183 + ], + "lines": [ + { + "bbox": [ + 383, + 176, + 463, + 183 + ], + "spans": [ + { + "bbox": [ + 383, + 176, + 463, + 183 + ], + "type": "text", + "content": "Category: Object Interactions" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 319, + 184, + 526, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 184, + 526, + 196 + ], + "spans": [ + { + "bbox": [ + 319, + 184, + 526, + 196 + ], + "type": "text", + "content": "Caption: The video shows a black and white spiral that is spinning. The spiral is made up of alternating black and white stripes that are evenly spaced and symmetrical." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 71, + 220, + 302, + 268 + ], + "blocks": [ + { + "bbox": [ + 71, + 220, + 302, + 268 + ], + "lines": [ + { + "bbox": [ + 71, + 220, + 302, + 268 + ], + "spans": [ + { + "bbox": [ + 71, + 220, + 302, + 268 + ], + "type": "image", + "image_path": "5588664e10ed778735c066db9c914cdbe8fd018b0c878322cb89d5734afe3da3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 272, + 225, + 278 + ], + "lines": [ + { + "bbox": [ + 149, + 272, + 225, + 278 + ], + "spans": [ + { + "bbox": [ + 149, + 272, + 225, + 278 + ], + "type": "text", + "content": "Category: Food Preparation" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 280, + 299, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 280, + 299, + 303 + ], + "spans": [ + { + "bbox": [ + 75, + 280, + 299, + 303 + ], + "type": "text", + "content": "Caption: The video shows a person cutting an green color item into small pieces. They are using a knife to slice the pickle into thin pieces, and then chopping those pieces into smaller cubes. The person is working on a wooden cutting board, and the Hands are visible from the left side of the frame with pink nail paint on their nails." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 308, + 220, + 539, + 268 + ], + "blocks": [ + { + "bbox": [ + 308, + 220, + 539, + 268 + ], + "lines": [ + { + "bbox": [ + 308, + 220, + 539, + 268 + ], + "spans": [ + { + "bbox": [ + 308, + 220, + 539, + 268 + ], + "type": "image", + "image_path": "7478d5141e36888fb5f937cef316c63db3911b313ad911182a76d9d80cc5f380.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 389, + 277, + 457, + 284 + ], + "lines": [ + { + "bbox": [ + 389, + 277, + 457, + 284 + ], + "spans": [ + { + "bbox": [ + 389, + 277, + 457, + 284 + ], + "type": "text", + "content": "Category: Work Activities" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 285, + 531, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 285, + 531, + 297 + ], + "spans": [ + { + "bbox": [ + 315, + 285, + 531, + 297 + ], + "type": "text", + "content": "Caption: The video shows a person using a shovel to clean the ashes from a fireplace. They are scooping up the ashes and removing them from the fireplace." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 71, + 319, + 302, + 371 + ], + "blocks": [ + { + "bbox": [ + 71, + 319, + 302, + 371 + ], + "lines": [ + { + "bbox": [ + 71, + 319, + 302, + 371 + ], + "spans": [ + { + "bbox": [ + 71, + 319, + 302, + 371 + ], + "type": "image", + "image_path": "a7faa8bb779978d62ee631eba3e97506f422bda9d1a13e3b20786dd962483c5d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 373, + 223, + 379 + ], + "lines": [ + { + "bbox": [ + 151, + 373, + 223, + 379 + ], + "spans": [ + { + "bbox": [ + 151, + 373, + 223, + 379 + ], + "type": "text", + "content": "Category: Outdoor Scenes" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 77, + 380, + 296, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 380, + 296, + 404 + ], + "spans": [ + { + "bbox": [ + 77, + 380, + 296, + 404 + ], + "type": "text", + "content": "Caption: The video shows a tall, pointed structure in the middle of a field. and the structure is surrounded by trees and other vegetation. The field is divided into sections, with some areas covered in green grass and others covered in white material. The video shows the structure and the field from a distance, with the camera moving around it." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 322, + 539, + 371 + ], + "blocks": [ + { + "bbox": [ + 307, + 322, + 539, + 371 + ], + "lines": [ + { + "bbox": [ + 307, + 322, + 539, + 371 + ], + "spans": [ + { + "bbox": [ + 307, + 322, + 539, + 371 + ], + "type": "image", + "image_path": "e1f6bd1a6e1428498e5dad05ef9684dc27e65ce2230edda4cda2e837c0fd68b8.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 398, + 375, + 448, + 382 + ], + "lines": [ + { + "bbox": [ + 398, + 375, + 448, + 382 + ], + "spans": [ + { + "bbox": [ + 398, + 375, + 448, + 382 + ], + "type": "text", + "content": "Category: Animals" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 383, + 533, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 383, + 533, + 401 + ], + "spans": [ + { + "bbox": [ + 313, + 383, + 533, + 401 + ], + "type": "text", + "content": "Caption: The video shows a white and gray adult cat and two kittens. The adult cat is grooming the kitten closest to it with its tongue, and the kitten is looking around. A hand reaches out from the frame's upper left to pet the two kittens." + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 71, + 422, + 302, + 467 + ], + "blocks": [ + { + "bbox": [ + 71, + 422, + 302, + 467 + ], + "lines": [ + { + "bbox": [ + 71, + 422, + 302, + 467 + ], + "spans": [ + { + "bbox": [ + 71, + 422, + 302, + 467 + ], + "type": "image", + "image_path": "319104e044a229bfe0fc6e17d447be4ef0e0b642fbd22115c20140690a4d8292.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 155, + 479, + 220, + 486 + ], + "lines": [ + { + "bbox": [ + 155, + 479, + 220, + 486 + ], + "spans": [ + { + "bbox": [ + 155, + 479, + 220, + 486 + ], + "type": "text", + "content": "Category: Water Scenes" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 82, + 487, + 292, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 487, + 292, + 500 + ], + "spans": [ + { + "bbox": [ + 82, + 487, + 292, + 500 + ], + "type": "text", + "content": "Caption: The video shows a large school of fish swimming in a water body towards the right frame. The camera too pans a little to the right." + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 307, + 422, + 539, + 467 + ], + "blocks": [ + { + "bbox": [ + 307, + 422, + 539, + 467 + ], + "lines": [ + { + "bbox": [ + 307, + 422, + 539, + 467 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 539, + 467 + ], + "type": "image", + "image_path": "de294a290d605ae638c932a5b630ab3c7b2b5a3f0a9a6ff7d04285e7b65f6ea8.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 388, + 479, + 459, + 486 + ], + "lines": [ + { + "bbox": [ + 388, + 479, + 459, + 486 + ], + "spans": [ + { + "bbox": [ + 388, + 479, + 459, + 486 + ], + "type": "text", + "content": "Category: Object Handling" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 487, + 531, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 487, + 531, + 500 + ], + "spans": [ + { + "bbox": [ + 315, + 487, + 531, + 500 + ], + "type": "text", + "content": "Caption: The video shows a person putting a bowl of something into an oven. The person then closes the oven door. The background is blurry." + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 71, + 528, + 302, + 570 + ], + "blocks": [ + { + "bbox": [ + 71, + 528, + 302, + 570 + ], + "lines": [ + { + "bbox": [ + 71, + 528, + 302, + 570 + ], + "spans": [ + { + "bbox": [ + 71, + 528, + 302, + 570 + ], + "type": "image", + "image_path": "84daeda79e0b2b2f621e8931263c95a39b53a1427968537d1a6a10eec3f36ab8.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 574, + 221, + 582 + ], + "lines": [ + { + "bbox": [ + 153, + 574, + 221, + 582 + ], + "spans": [ + { + "bbox": [ + 153, + 574, + 221, + 582 + ], + "type": "text", + "content": "Category: Close-up Shots" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 75, + 583, + 299, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 583, + 299, + 606 + ], + "spans": [ + { + "bbox": [ + 75, + 583, + 299, + 606 + ], + "type": "text", + "content": "Caption: The video shows a white counter with two brown buckets and a yellow bucket. Then a person's right hand wearing a green glove enters the frame from top right side and place a yellow flower near to yellow watering can. The person then places the flower, in front of the buckets and exits the frame. In the background is a brown wall, and the camera is static throughout the clip." + } + ] + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 307, + 525, + 539, + 570 + ], + "blocks": [ + { + "bbox": [ + 307, + 525, + 539, + 570 + ], + "lines": [ + { + "bbox": [ + 307, + 525, + 539, + 570 + ], + "spans": [ + { + "bbox": [ + 307, + 525, + 539, + 570 + ], + "type": "image", + "image_path": "7a97e51e5aed728ca60da5c0696ac87233b5df8d196112a189b1a35ae2cf82df.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 389, + 578, + 457, + 584 + ], + "lines": [ + { + "bbox": [ + 389, + 578, + 457, + 584 + ], + "spans": [ + { + "bbox": [ + 389, + 578, + 457, + 584 + ], + "type": "text", + "content": "Category: Nature Scenes" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 616, + 541, + 638 + ], + "lines": [ + { + "bbox": [ + 67, + 616, + 541, + 638 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 541, + 638 + ], + "type": "text", + "content": "Figure 18 More PE Video Dataset Examples. For each of the ten categories, we randomly pick one video and show its video caption. The captions were generated by our video data pipeline and then refined by human annotators." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 586, + 529, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 586, + 529, + 604 + ], + "spans": [ + { + "bbox": [ + 315, + 586, + 529, + 604 + ], + "type": "text", + "content": "Caption: The video shows a pile of branches and leaves on fire in a field. The fire is burning brightly, with flames licking at the edges of the pile. The smoke from the fire rises into the air, billowing up into the sky." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 64, + 234, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 64, + 234, + 79 + ], + "spans": [ + { + "bbox": [ + 68, + 64, + 234, + 79 + ], + "type": "text", + "content": "B Implementation Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 91, + 138, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 91, + 138, + 103 + ], + "spans": [ + { + "bbox": [ + 68, + 91, + 138, + 103 + ], + "type": "text", + "content": "B.1 PE Core" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 112, + 543, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 112, + 543, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 112, + 543, + 125 + ], + "type": "text", + "content": "We provide additional implementation details for building " + }, + { + "bbox": [ + 67, + 112, + 543, + 125 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 112, + 543, + 125 + ], + "type": "text", + "content": ". Our implementation is based on OpenCLIP5." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 138, + 237, + 151 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 138, + 237, + 151 + ], + "spans": [ + { + "bbox": [ + 67, + 138, + 237, + 151 + ], + "type": "text", + "content": "B.1.1 Architecture and Training Setups" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "type": "text", + "content": "Model Architecture. Following CLIP, " + }, + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "type": "text", + "content": " comprises a Transformer-based [141] vision and a text encoder. We employ customized Transformer configurations as detailed in Tab. 17. For pooling, we an attention pooling block in the style of SigLIP [160] with 8 heads from the last-layer feature to construct image and video embeddings. Regarding positional embedding, we use 2D RoPE [127] for relative positional embeddings and 2D learnable absolute positional embeddings (abs) the same size as the model's input resolution. We interpolate positional embeddings to enable support for various resolutions beyond the default. The text context length is 72 for G-scale and 32 for B and L-scale models. Originally a bug, we find it optimal to not disable the class token when using attention pooling for smaller models. Thus, the B and L models use a class token, then the attention pooling layer probes all features at once (class token included). Finally, we use an input mean and standard deviation of " + }, + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "type": "inline_equation", + "content": "(0.5,0.5,0.5)" + }, + { + "bbox": [ + 67, + 160, + 544, + 281 + ], + "type": "text", + "content": " for simplicity." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 116, + 289, + 493, + 358 + ], + "blocks": [ + { + "bbox": [ + 116, + 289, + 493, + 358 + ], + "lines": [ + { + "bbox": [ + 116, + 289, + 493, + 358 + ], + "spans": [ + { + "bbox": [ + 116, + 289, + 493, + 358 + ], + "type": "table", + "html": "
ScaleTowerParamsWidthDepthMLPHeadsCLIP DimPoolingPositional EmbeddingResolution & Context LenPatch SizeClass Token Register
BVision0.09B768123072121024Attn PoolRoPE+Abs22416
Text0.31B102424409616EOS TokenAbs32--
LVision0.32B1024244096161024Attn PoolRoPE+Abs33614
Text0.31B102424409616EOS TokenAbs32--
GVision1.88B1536508960161280Attn PoolRoPE+Abs44814
Text0.47B128024512020EOS TokenAbs72--
", + "image_path": "c293da833f1f864b33c52c0b9a7471a4ecaa95fb54573885bda040013500ef54.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "spans": [ + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "type": "text", + "content": "PE Core Training. As discussed in §2.4, the training of " + }, + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "type": "text", + "content": " involves three stages: 1) image pretraining; 2) image and video finetuning; and 3) an additional model distillation for smaller models. These three stages work together to develop a robust and effective " + }, + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 388, + 542, + 424 + ], + "type": "text", + "content": " model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 430, + 522, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 430, + 522, + 442 + ], + "spans": [ + { + "bbox": [ + 67, + 430, + 522, + 442 + ], + "type": "text", + "content": "We first provide training recipes for 1) image pretraining in Tab. 18 and 2) video finetuning in Tab. 19." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 88, + 452, + 208, + 632 + ], + "blocks": [ + { + "bbox": [ + 200, + 361, + 408, + 371 + ], + "lines": [ + { + "bbox": [ + 200, + 361, + 408, + 371 + ], + "spans": [ + { + "bbox": [ + 200, + 361, + 408, + 371 + ], + "type": "text", + "content": "Table 17 PE Model Configurations with full details." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 88, + 452, + 208, + 632 + ], + "lines": [ + { + "bbox": [ + 88, + 452, + 208, + 632 + ], + "spans": [ + { + "bbox": [ + 88, + 452, + 208, + 632 + ], + "type": "table", + "html": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate2e-3
batch size131,072
warm-up steps2K
training steps443K (B, L) / 656K (G)
data quantity5.4B
samples seen58B (B, L) / 86B (G)
max logit scale100
mask reg ratio0.4
mask reg batch8192
progressive res112-160-224 (B)
98-154-224-336 (L)
98-154-224-336-448 (G)
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
", + "image_path": "60f27e61231de16ef8490017adbcf2ac7e92d19c25c123c43f3166e7cb26afdf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 250, + 452, + 375, + 594 + ], + "blocks": [ + { + "bbox": [ + 250, + 452, + 375, + 594 + ], + "lines": [ + { + "bbox": [ + 250, + 452, + 375, + 594 + ], + "spans": [ + { + "bbox": [ + 250, + 452, + 375, + 594 + ], + "type": "table", + "html": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size4096
warm-up steps2K
training steps5.4K
data quantity22M
samples seen22M
max logit scale100
number of frames8
data augaspect jitter ar(0.75,1.33)
rand crop s(0.08,1)
color jitter j(0.32,0,0.32,0)
hflip p(0.5)
", + "image_path": "054f467c3336982f3f223e3b29240981096ae34d8b34df2a325a937eacb719bb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 425, + 452, + 514, + 573 + ], + "blocks": [ + { + "bbox": [ + 425, + 452, + 514, + 573 + ], + "lines": [ + { + "bbox": [ + 425, + 452, + 514, + 573 + ], + "spans": [ + { + "bbox": [ + 425, + 452, + 514, + 573 + ], + "type": "table", + "html": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate1e-6
batch size16384
warm-up steps2K
training steps269K
data quantity5.4B
samples seen4.4B
max logit scale100
teacher logit scale200 (§C.3)
data augNone
", + "image_path": "ef15fc2bd9455fb292713d568e39c9a6348ca77f140ee0f3585a04410a9ad1b8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 429, + 576, + 511, + 586 + ], + "lines": [ + { + "bbox": [ + 429, + 576, + 511, + 586 + ], + "spans": [ + { + "bbox": [ + 429, + 576, + 511, + 586 + ], + "type": "text", + "content": "Table 20 Distillation." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 260, + 597, + 364, + 608 + ], + "lines": [ + { + "bbox": [ + 260, + 597, + 364, + 608 + ], + "spans": [ + { + "bbox": [ + 260, + 597, + 364, + 608 + ], + "type": "text", + "content": "Table 19 Video Finetuning." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 93, + 635, + 200, + 645 + ], + "lines": [ + { + "bbox": [ + 93, + 635, + 200, + 645 + ], + "spans": [ + { + "bbox": [ + 93, + 635, + 200, + 645 + ], + "type": "text", + "content": "Table 18 Image Pretraining." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 658, + 541, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 658, + 541, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 658, + 541, + 682 + ], + "type": "text", + "content": "After training the largest G-scale model, we train the smaller models with image pretraining, then distill with image distillation in Tab. 20, then finally apply video finetuning at the end." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 689, + 256, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 689, + 256, + 700 + ], + "spans": [ + { + "bbox": [ + 78, + 689, + 256, + 700 + ], + "type": "text", + "content": "5https://github.com/mlfoundations/open Clip" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 260, + 75 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 260, + 75 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 260, + 75 + ], + "type": "text", + "content": "B.1.2 Zero-Shot Classification and Retrieval" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 86, + 542, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 86, + 542, + 135 + ], + "spans": [ + { + "bbox": [ + 67, + 86, + 542, + 135 + ], + "type": "text", + "content": "Zero-Shot Evaluation on Images and Videos. We use CLIPBench for zero-shot classification and retrieval benchmarking. The benchmark datasets and splits are obtained from the original dataset websites or HuggingFace. We extend the CLIPBench zero-shot evaluation to include video datasets such as MSR-VTT and Kinetics, and will release our model checkpoints, evaluation code, and scripts for reproducibility." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 144, + 543, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 144, + 543, + 180 + ], + "spans": [ + { + "bbox": [ + 67, + 144, + 543, + 180 + ], + "type": "text", + "content": "Prompt Design. For zero-shot image-text and video-text retrieval, we rely solely on the original captions without any additional prompts. In contrast, for zero-shot classification, we utilize task-specific prompts graciously provided by the InternVL [19] authors. All additional prompts will be released." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 185, + 541, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 185, + 541, + 210 + ], + "spans": [ + { + "bbox": [ + 67, + 185, + 541, + 210 + ], + "type": "text", + "content": "For example, we employ specific prompts for zero-shot image classification on various ImageNet benchmarks (e.g., ImageNet val, ImageNet v2) and video classification on Kinetics datasets (e.g., K400, K600, K700)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 217, + 272, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 217, + 272, + 226 + ], + "spans": [ + { + "bbox": [ + 73, + 217, + 272, + 226 + ], + "type": "text", + "content": "Zero-Shot Image Classification Prompts - ImageNet" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "spans": [ + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": "a bad photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of many " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a sculpture of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the hard to see " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a low resolution photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a rendering of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". graffiti of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a bad photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a cropped photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a tattoo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the embroidered " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a hard to see " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a bright photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a clean " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a dirty " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a dark photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a drawing of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of my " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the plastic " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the cool " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a close-up photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a black and white photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a painting of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a painting of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a pixelated photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a sculpture of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a bright photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a cropped photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a plastic " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the dirty " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". aJPEG corrupted photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a blurry photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a good photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a rendering of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": " in a video game. a photo of one " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a doodle of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a close-up photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the origami " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": " in a video game. a sketch of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a doodle of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a origami " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a low resolution photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the toy " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a rendition of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the clean " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a large " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a rendition of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a nice " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of a weird " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a blurry photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a cartoon " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". art of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a sketch of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a embroidered " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a pixelated photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". itap of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a JPEG corrupted photo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a good photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a plushie " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the nice " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the small " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the weird " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the cartoon " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". art of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a drawing of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a photo of the large " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a black and white photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". the plushie " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a dark photo of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". itap of a " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". graffiti of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": ". a toy " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}." + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": " itap of my " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}." + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": " a photo of a cool " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}." + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": " a photo of a small " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}." + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "text", + "content": " a tattoo of the " + }, + { + "bbox": [ + 72, + 229, + 539, + 383 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 392, + 272, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 392, + 272, + 400 + ], + "spans": [ + { + "bbox": [ + 73, + 392, + 272, + 400 + ], + "type": "text", + "content": "Zero-Shot Video Classification Prompts - Kinetics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "spans": [ + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": "a photo of " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a photo of a person " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a photo of a person using " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a photo of a person doing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a photo of a person during " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a photo of a person performing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a photo of a person practicing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a video of " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a video of a person using " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a video of a person doing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a video of a person during " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a video of a person performing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a video of a person practicing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of a person " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of a person using " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of a person doing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of a person during " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of a person performing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a example of a person practicing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a demonstration of " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a demonstration of a person " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a demonstration of a person using " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a demonstration of a person doing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a demonstration of a person during " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": ". a demonstration of a person performing " + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 72, + 403, + 538, + 480 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 491, + 543, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 543, + 563 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 543, + 563 + ], + "type": "text", + "content": "Evaluation Method. Several works use different input transformations for different datasets when evaluating zero-shot performance (e.g., [33, 130, 138, 160]). To be as fair as possible, we follow [130] in evaluating with two transformations—center crop and non aspect ratio preserving resize (\"squash\")—and report the max between the two for all models and all datasets we evaluate. Additionally, ObjectNet has a red border around every image to facilitate dedduplication, which we remove for evaluation. Finally, we follow [19] in using retrieval reweighting (DSL), applying the softmax score distribution to the similarities used for retrieval:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 208, + 571, + 542, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 571, + 542, + 583 + ], + "spans": [ + { + "bbox": [ + 208, + 571, + 542, + 583 + ], + "type": "interline_equation", + "content": "\\text {s c o r e s} = \\text {s c o r e s} * \\text {s o f t m a x} (\\text {s c o r e s}, \\dim = 0) \\tag {1}", + "image_path": "f52605fdf5beeb5ee0955806cc4d9e34acda98833d5b83e553f7a76cb644ecf0.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 591, + 543, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 591, + 543, + 628 + ], + "spans": [ + { + "bbox": [ + 67, + 591, + 543, + 628 + ], + "type": "text", + "content": "This slightly improves retrieval for most models, so we do it for all models we evaluate for fairness. Notably, we were able to reproduce the reported numbers for most papers with these techniques, but for cases where we could not, we default to the reported number." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 642, + 222, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 642, + 222, + 654 + ], + "spans": [ + { + "bbox": [ + 67, + 642, + 222, + 654 + ], + "type": "text", + "content": "B.2 PE: Language Alignment" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 661, + 543, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 543, + 674 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 543, + 674 + ], + "type": "text", + "content": "We provide details of the MLLM experimental setup in " + }, + { + "bbox": [ + 67, + 661, + 543, + 674 + ], + "type": "inline_equation", + "content": "\\S 4" + }, + { + "bbox": [ + 67, + 661, + 543, + 674 + ], + "type": "text", + "content": ". We describe data, model, and training separately." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 682, + 543, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 682, + 543, + 707 + ], + "spans": [ + { + "bbox": [ + 67, + 682, + 543, + 707 + ], + "type": "text", + "content": "Data. Our MLLM training contains warmup data and supervised finetuning (SFT) data. Our warmup data is a 1M subset image-text pairs of our " + }, + { + "bbox": [ + 67, + 682, + 543, + 707 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 682, + 543, + 707 + ], + "type": "text", + "content": " pretraining dataset. For SFT data, we use a diverse data" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 712, + 272, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 712, + 272, + 723 + ], + "spans": [ + { + "bbox": [ + 78, + 712, + 272, + 723 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 78, + 712, + 272, + 723 + ], + "type": "text", + "content": "https://github.com/LAION-AI/CLIP_benchmark" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "type": "text", + "content": "mix consisting of 2.6M unique samples. This dataset is composed of " + }, + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "type": "inline_equation", + "content": "1.7\\mathrm{M}^7" + }, + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "type": "text", + "content": " visual QAs samples from the Cauldron [65], 0.5M grounded QA pairs from Visual Genome [60], Flickr-Entities [103] and Densely Captioned Images [139], 0.1M image-captioning pairs from COCO [76] and 0.3M text-only samples. This comprehensive data mix allows us to thoroughly assess our model's capabilities in various MLLM tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "spans": [ + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "text", + "content": "Model. As described in § 4.1, we use a simple vision-language model architecture where a vision encoder and a pretrained decoder-only LLM are connected by a vision projector. For all tables, we use either Llama3.1-instruct 8B or QwenLM 2.5-instruct 7B as a language model, and 2-layer MLP as a vision projector. For fair comparison, we use the native resolution for image input. During inference, we evaluate the models on video tasks in zero-shot manner: We concatenate all video frames into a sequence and feed to language model, without seeing video samples during SFT. For all video tasks, we use 8 frames with the same native resolution of height and width. For " + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "text", + "content": ", this makes " + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "inline_equation", + "content": "448 \\times 448 \\times 8" + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "text", + "content": " input and " + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "inline_equation", + "content": "32 \\times 32 \\times 8" + }, + { + "bbox": [ + 67, + 121, + 543, + 207 + ], + "type": "text", + "content": " vision tokens." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "spans": [ + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": "Training. MLLM training consists of warmup and supervised finetuning (SFT) stages. In both stages, we freeze vision encoder and train vision projector and LLM. During warmup stage, we use a global batch size of 128 with a learning rate of " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": ". We gradually increase the learning rate from " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": " over 120 steps, and follow a cosine learning rate decay schedule to train a total of 8,000 steps. During SFT stage, we use a global batch size 256 with a learning rate of " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": ". Similar to the warmup, we gradually increase the learning rate from " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-7}" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": " over 300 steps, and follow a cosine learning rate decay schedule to train a total of 12.5K steps. We truncate text-sequences longer than 2,048 tokens on top the visual tokens. This makes the maximum sequence length to be (num. vision tokens) + 2,048. With " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": " input resolution and patch size of 14, we set the maximum sequence length to " + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "inline_equation", + "content": "1,024 + 2,048 = 3,072" + }, + { + "bbox": [ + 67, + 215, + 544, + 360 + ], + "type": "text", + "content": ". To represent bounding boxes on output side for image grounding tasks, we simply use text tokens to represent each bounding box: each coordinate is normalized between 000 and 999, in “[x, y, x, y]” box format for top-left and bottom-right corners (e.g., [012, 122, 633, 782])." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 364, + 543, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 364, + 543, + 401 + ], + "spans": [ + { + "bbox": [ + 67, + 364, + 543, + 401 + ], + "type": "text", + "content": "For all baselines, we search for the best intermediate layer features to adapt to LLM. We search over " + }, + { + "bbox": [ + 67, + 364, + 543, + 401 + ], + "type": "inline_equation", + "content": "\\{-1, -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -40\\}" + }, + { + "bbox": [ + 67, + 364, + 543, + 401 + ], + "type": "text", + "content": " layers (counting from last) and report the best result in average over OCR/Chart/Document Q&A, Visual Q&A, Image Captioning and Video Understanding." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 415, + 209, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 415, + 209, + 428 + ], + "spans": [ + { + "bbox": [ + 67, + 415, + 209, + 428 + ], + "type": "text", + "content": "B.3 PE: Spatial Alignment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 436, + 167, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 436, + 167, + 448 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 167, + 448 + ], + "type": "text", + "content": "B.3.1 Training Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "spans": [ + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "type": "text", + "content": "Loss Functions. For self-aligning to frozen " + }, + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "type": "text", + "content": " layer 41 features (" + }, + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 457, + 536, + 470 + ], + "type": "text", + "content": "), we minimize cosine similarity:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 228, + 478, + 542, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 478, + 542, + 507 + ], + "spans": [ + { + "bbox": [ + 228, + 478, + 542, + 507 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {c o r e}} = \\frac {1}{n _ {\\mathrm {t o k}}} \\sum \\left(\\frac {\\left(S _ {5 0}\\right) \\left(T _ {4 1}\\right) ^ {T}}{\\left\\| S _ {5 0} \\right\\| \\cdot \\left\\| T _ {4 1} \\right\\|}\\right) \\tag {2}", + "image_path": "053d59bc4e3905221084bc6f9359e600a47cf14faa2f757a8cbbbcf1cd8123f8.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "spans": [ + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "inline_equation", + "content": "S_{50}" + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "text", + "content": " denotes the last layer features of the student, " + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "inline_equation", + "content": "T_{41}" + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "text", + "content": " denotes frozen layer 41 features from " + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "inline_equation", + "content": "n_{\\mathrm{tok}}" + }, + { + "bbox": [ + 67, + 512, + 543, + 550 + ], + "type": "text", + "content": " represents the number of tokens. Note that we chose 41 fairly arbitrarily (it is layer 40 when written with indexing from 0). Judging by Fig. 8, any layer around 40 should work (and 39 may be slightly better)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 555, + 543, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 543, + 591 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 543, + 591 + ], + "type": "text", + "content": "For the encouraging locality loss " + }, + { + "bbox": [ + 67, + 555, + 543, + 591 + ], + "type": "inline_equation", + "content": "(L_{\\mathrm{loc}})" + }, + { + "bbox": [ + 67, + 555, + 543, + 591 + ], + "type": "text", + "content": ", we compute the pairwise cosine similarity between a model's own tokens and itself. This forms a \"spatial correspondence map\" for what tokens should be considered similar. We then compute the same for the student, and minimize the difference between the two with MSE loss:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 189, + 599, + 542, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 599, + 542, + 628 + ], + "spans": [ + { + "bbox": [ + 189, + 599, + 542, + 628 + ], + "type": "interline_equation", + "content": "L _ {\\text {l o c}} = \\frac {1}{n _ {\\text {t o k}} ^ {2}} \\sum \\left(\\frac {(S _ {5 0}) (S _ {5 0}) ^ {T}}{| | S _ {5 0} | | ^ {2}} - \\frac {(T _ {\\mathrm {S A M}}) (T _ {\\mathrm {S A M}}) ^ {T}}{| | T _ {\\mathrm {S A M}} | | ^ {2}}\\right) ^ {2} \\tag {3}", + "image_path": "5f933611d6bf3d2dff057f4942f99e38f65cccdfe21d7a7d1c12204493408f06.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{SAM}}" + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "content": " denotes the \"SAM Mask Logits\" constructed in §5.2. We also find using a temperature " + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "inline_equation", + "content": "(t)" + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "content": " on the SAM teacher's pairwise cosine similarity term " + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "inline_equation", + "content": "(x)" + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "content": " useful: " + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "inline_equation", + "content": "e^{t(x - 1)}" + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "content": ". The full loss is " + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{spatial}} = L_{\\mathrm{core}} + L_{\\mathrm{loc}}" + }, + { + "bbox": [ + 67, + 636, + 543, + 662 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 670, + 544, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 670, + 544, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 670, + 544, + 706 + ], + "type": "text", + "content": "Hyperparameters. In Tab. 21 we show the training hyperparameters for spatial alignment, finetuned on top of the initial " + }, + { + "bbox": [ + 67, + 670, + 544, + 706 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 670, + 544, + 706 + ], + "type": "text", + "content": " checkpoint. Then in Tab. 22 and Tab. 23, we show the settings for the two teachers and losses. Note that when running the teachers, we run them on the exact same image as the student (same data" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 79, + 712, + 217, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 712, + 217, + 723 + ], + "spans": [ + { + "bbox": [ + 79, + 712, + 217, + 723 + ], + "type": "text", + "content": "7We excluded multi-images samples." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 89 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 89 + ], + "type": "text", + "content": "aug and all). Additionally, because the SAM 2.1 teacher operates at a resolution of 1024, we upsample the image, generate the mask logits, and then downsample the result. Both teachers are frozen." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 83, + 97, + 201, + 263 + ], + "blocks": [ + { + "bbox": [ + 83, + 97, + 201, + 263 + ], + "lines": [ + { + "bbox": [ + 83, + 97, + 201, + 263 + ], + "spans": [ + { + "bbox": [ + 83, + 97, + 201, + 263 + ], + "type": "table", + "html": "
configvalues
optimizerLAMB
β1, β2(0.9, 0.95)
weight decay0.05
learning rate5e-4
batch size12,288
warm-up steps0
training steps24K
data quantity5.4B (PEcore PT Data)
samples seen300M
resolution448
mask ratio0.75
mask size2×2 tokens
droppath0.4
layerscale0.1
aspect jitter ar(0.75,1.33)
data augcolor jitter j(0.32,0,0.32,0)
hflip p(0.5)
", + "image_path": "5225fa73ec00cf4cdb350eeeb68dc140d3367e44438e9b1899dd91f68f3034d2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 241, + 97, + 370, + 202 + ], + "blocks": [ + { + "bbox": [ + 86, + 266, + 195, + 277 + ], + "lines": [ + { + "bbox": [ + 86, + 266, + 195, + 277 + ], + "spans": [ + { + "bbox": [ + 86, + 266, + 195, + 277 + ], + "type": "text", + "content": "Table 21 Spatial Alignment." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 241, + 97, + 370, + 202 + ], + "lines": [ + { + "bbox": [ + 241, + 97, + 370, + 202 + ], + "spans": [ + { + "bbox": [ + 241, + 97, + 370, + 202 + ], + "type": "table", + "html": "
configvalues
modelSAM 2.1-L
layermask logits
resolution1024 (interp→448)
lossEq. 3
loss weight1
temperature20
sample points32 × 32 (1024)
pred iou threshold0
stability score threshold0
mask threshold0
", + "image_path": "136f3d22e44c07b73f44f9a797639d42c616d674d5ecb888fb4f0d58cb59d6b2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 419, + 97, + 520, + 155 + ], + "blocks": [ + { + "bbox": [ + 253, + 205, + 357, + 215 + ], + "lines": [ + { + "bbox": [ + 253, + 205, + 357, + 215 + ], + "spans": [ + { + "bbox": [ + 253, + 205, + 357, + 215 + ], + "type": "text", + "content": "Table 22 SAM 2.1 Teacher." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 419, + 97, + 520, + 155 + ], + "lines": [ + { + "bbox": [ + 419, + 97, + 520, + 155 + ], + "spans": [ + { + "bbox": [ + 419, + 97, + 520, + 155 + ], + "type": "table", + "html": "
configvalues
modelPEcoreG
layer41
resolution448
lossEq. 2
loss weight1
", + "image_path": "224c02e75a06293b632476410d4685357e8faa8895d55dbfbc83851eee821798.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 419, + 158, + 520, + 168 + ], + "lines": [ + { + "bbox": [ + 419, + 158, + 520, + 168 + ], + "spans": [ + { + "bbox": [ + 419, + 158, + 520, + 168 + ], + "type": "text", + "content": "Table 23 PEcoreG Teacher." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 297, + 191, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 297, + 191, + 307 + ], + "spans": [ + { + "bbox": [ + 67, + 297, + 191, + 307 + ], + "type": "text", + "content": "B.3.2 Visualization Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 315, + 544, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 315, + 544, + 352 + ], + "spans": [ + { + "bbox": [ + 66, + 315, + 544, + 352 + ], + "type": "text", + "content": "To visualize the features in Fig. 17 and Fig. 20, our goal is to map a 1536-dimensional space down to 3 dimensions to view how the model encodes each token in relation to each other. One naive approach would be to apply PCA with 3 dimensions across all token in the image. However, we find this alone can be misleading." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 357, + 544, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 357, + 544, + 441 + ], + "spans": [ + { + "bbox": [ + 66, + 357, + 544, + 441 + ], + "type": "text", + "content": "Specifically, if the model has rich semantics, it should be the case that most of those 1536 features have some useful information in them. Some of that information could be spatially contiguous, some of it not. We want PCA to only select the spatially contiguous information, since we are trying to evaluate the spatial quality of the features. However, naively applying PCA will not necessarily do that, especially for models with information aggregated in \"global tokens\" (§5.1). Despite these tokens carrying important information, they are not spatially contiguous. Thus, if PCA dedicates a large portion of its 3 dimensions to global tokens, the features will look like their spatial quality is bad, despite the features containing good spatial information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "spans": [ + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "text", + "content": "So, how do we select for only the spatially contiguous information to visualize? The answer is simple: by definition, the spatially contiguous information will be... spatially contiguous. To keep the spatially contiguous information while lowering the impact of the global tokens, we can simply apply a low pass filter to the features (specifically, a gaussian blur with kernel size 3 and a " + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "text", + "content": " of 1). To retain the detail of the original features, we can average the two together. Thus, to visualize features, we use the 3D PCA of the of the following. " + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "text", + "content": " denotes the model's output features, and " + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "inline_equation", + "content": "g(x)" + }, + { + "bbox": [ + 66, + 446, + 543, + 519 + ], + "type": "text", + "content": " denotes gaussian blur." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 242, + 525, + 542, + 538 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 525, + 542, + 538 + ], + "spans": [ + { + "bbox": [ + 242, + 525, + 542, + 538 + ], + "type": "interline_equation", + "content": "0. 5 x + 0. 5 g (x, k = 3, \\sigma = 1) \\tag {4}", + "image_path": "0ec51d0218f040fa5ad324cfe27ac42cd2a57739eb907bcc101b1e408b010d84.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 544, + 543, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 544, + 543, + 582 + ], + "spans": [ + { + "bbox": [ + 66, + 544, + 543, + 582 + ], + "type": "text", + "content": "We show the impact of this in Fig. 19. Blurring the features make them appear more detailed! In reality, that information was always there, just PCA did not show it. Thus, great care must be taken when visualizing high dimensional feature spaces. If they were easy to map to 3 dimensions—you wouldn't need 1536 of them!" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 187, + 596, + 422, + 673 + ], + "blocks": [ + { + "bbox": [ + 187, + 596, + 422, + 673 + ], + "lines": [ + { + "bbox": [ + 187, + 596, + 422, + 673 + ], + "spans": [ + { + "bbox": [ + 187, + 596, + 422, + 673 + ], + "type": "image", + "image_path": "64c17d9abfbb16b4abdaf37be7f39a74f506431fd7c28364f87176745b193285.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 677, + 543, + 721 + ], + "lines": [ + { + "bbox": [ + 66, + 677, + 543, + 721 + ], + "spans": [ + { + "bbox": [ + 66, + 677, + 543, + 721 + ], + "type": "text", + "content": "Figure 19 Feature Visualization Ablation. With raw features (top row), PCA misses spatially contiguous parts of the feature space and instead focuses on global tokens (which carry information but are not spatially coherent). By applying a simple low pass filter (bottom row), we can reveal spatial information that PCA originally missed (see column 2: with raw features, the background looks like a mess, with the low pass filter the tiles become visible)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 112 + ], + "type": "text", + "content": "Then, to map the PCA dimensions to RBG pixel values, we map each PCA component to a corresponding channel in LCh color space, then convert those LCh colors to RGB to get the final image. Note that we use LCh instead of RGB directly for aesthetic reasons, and also because LCh is a cylindrical color space—where smooth changes to the values look like smooth changes in colors to humans—and thus is easier to discern." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 125, + 240, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 125, + 240, + 137 + ], + "spans": [ + { + "bbox": [ + 67, + 125, + 240, + 137 + ], + "type": "text", + "content": "B.3.3 Frozen Feature Dense Prediction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 144, + 542, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 144, + 542, + 168 + ], + "spans": [ + { + "bbox": [ + 67, + 144, + 542, + 168 + ], + "type": "text", + "content": "We discuss the detailed settings of the results for dense prediction with frozen features in Tab. 13. Each model is evaluated with its native resolution up to 448 or 448 (whichever is optimal)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 177, + 543, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 177, + 543, + 237 + ], + "spans": [ + { + "bbox": [ + 67, + 177, + 543, + 237 + ], + "type": "text", + "content": "Zero-Shot Tracking. We evaluate our pretrained models on label propagation task using the protocols in [52, 107] on DAVIS dataset [104]. This evaluation does not require any finetuning or probing, therefore preserves the spatial features in the model. Following Toto [107], we use the features from the last " + }, + { + "bbox": [ + 67, + 177, + 543, + 237 + ], + "type": "inline_equation", + "content": "n = 7" + }, + { + "bbox": [ + 67, + 177, + 543, + 237 + ], + "type": "text", + "content": " frames to find the nearest neighbor patch in the current frame, and then propagate the masks from the previous frames to the current frame. Note that this evaluation method does not require any training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 246, + 543, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 246, + 543, + 306 + ], + "spans": [ + { + "bbox": [ + 67, + 246, + 543, + 306 + ], + "type": "text", + "content": "Semantic Segmentation. For semantic segmentation, we evaluate our pretrained models on ADE20K [167] semantic segmentation task. We use a linear layer and convolutional layer to map intermediate spatial features to segmentation masks following [98]. The models are evaluated and then features are resized to " + }, + { + "bbox": [ + 67, + 246, + 543, + 306 + ], + "type": "inline_equation", + "content": "518 \\times 518" + }, + { + "bbox": [ + 67, + 246, + 543, + 306 + ], + "type": "text", + "content": ". We only use features from single layer. The probing layers are finetuned with AdamW [83] with a learning rate of 0.001." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 316, + 543, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 316, + 543, + 376 + ], + "spans": [ + { + "bbox": [ + 67, + 316, + 543, + 376 + ], + "type": "text", + "content": "Depth Estimation. For depth estimation on NYUv2 [123], we follow [75, 98]. We use a DPT-head [109] on top of our frozen pretrained model and use only single layer features. We scale the size of the DPT-head for each models based on the hidden size for each architecture. Because NYU is a small dataset and the models we evaluate are large, we observe the results for most models are noisy and prone to overfitting. Thus, for fair comparison we train all models for 20 epochs and for all models take the lowest validation loss over all epochs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 384, + 543, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 543, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 543, + 458 + ], + "type": "text", + "content": "Frozen Detection. For the frozen feature detection results presented in §3, we evaluated using Mask R-CNN [43] as a probe. We used a resolution of 1024 for Fig. 8 and 768 for the remaining experiments in §3. Because the backbones were frozen, we did not add any global attention and instead simply tiled the input image with a window size of 32 for the 1024px experiments and 24 for the 768px experiments. All models were interpolated to patch 16. Finally, the backbones were frozen and only the FPN and R-CNN heads trained for 15 epochs on COCO with a stepwise decay LR without drop path." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 471, + 321, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 321, + 482 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 321, + 482 + ], + "type": "text", + "content": "B.3.4 End-to-End Finetuning Detection and Segmentation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 488, + 543, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 488, + 543, + 537 + ], + "spans": [ + { + "bbox": [ + 67, + 488, + 543, + 537 + ], + "type": "text", + "content": "We provide a detailed discussion of settings of end-to-end finetuning on detection and segmentation presented in Tab. 14. The hyperparameters can be found in Tab. 24. We find that the default 100-epoch protocol in ViTDet [72, 149] causes overfitting problems in COCO experiments especially for billion-level parameter vision encoders, so we tune the training epochs, learning rate, drop path and learning rate decay accordingly." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 542, + 543, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 542, + 543, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 542, + 543, + 567 + ], + "type": "text", + "content": "The LVIS experiment setting is the same as COCO except all L-size models use learning rate of 2e-4 and all g-size and G-size models use 75 epochs." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 93, + 575, + 517, + 703 + ], + "blocks": [ + { + "bbox": [ + 93, + 575, + 517, + 703 + ], + "lines": [ + { + "bbox": [ + 93, + 575, + 517, + 703 + ], + "spans": [ + { + "bbox": [ + 93, + 575, + 517, + 703 + ], + "type": "table", + "html": "
configvaluesmodellrepochsdrop pathlr decaylayersglobal window indexwindow size
optimizerAdamWOpenAI CLIP-L1e-41000.40.824(5, 11, 17, 23)14
optimizer momentum(0.9, 0.999)MetaCLIP-L1e-41000.40.824(5, 11, 17, 23)14
weight decay0.1MetaCLIP-G5e-5750.50.948(11, 23, 35, 47)14
learning rateSigLIP-so1e-41000.40.827(2, 10, 18, 26)14
learning rate scheduleStep-wise decayEVA02-L1e-41000.40.824(5, 11, 17, 23)14
learning rate decayMAE-L1e-41000.40.824(5, 11, 17, 23)14
batch size64SigLIP2-so1e-41000.40.827(2, 10, 18, 26)14
image size1024 × 1024SigLIP2-g5e-5750.50.940(9, 19, 29, 39)14
augmentationLSJ [0.1, 2.0]DINOv2-L1e-41000.40.824(5, 11, 17, 23)32
epochsDINOv2-g5e-5360.50.940(9, 19, 29, 39)32
drop pathPEcoreG5e-5750.50.950(12, 24, 36, 49)32
positional embeddingabswin [7]PEspatialG5e-5360.50.950(12, 24, 36, 49)32
patch size16
window size
global window index
", + "image_path": "3b8467cf873fe448328bb00c09bd6f8eaa56dfe7a5132e65f77156c552df6aff.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 162, + 707, + 448, + 717 + ], + "lines": [ + { + "bbox": [ + 162, + 707, + 448, + 717 + ], + "spans": [ + { + "bbox": [ + 162, + 707, + 448, + 717 + ], + "type": "text", + "content": "Table 24 Settings for End-to-End Finetuning Detection and Segmentation." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 269, + 76 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 269, + 76 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 269, + 76 + ], + "type": "text", + "content": "B.3.5 System-Level Comparison on Detection" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 83, + 360, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 83, + 360, + 155 + ], + "spans": [ + { + "bbox": [ + 67, + 83, + 360, + 155 + ], + "type": "text", + "content": "We describe our implementation for system-level comparison to the state-of-the-arts on COCO object detection in Tab 15. Our implementation is based on the DETA repository8. We replace the vision encoder with our " + }, + { + "bbox": [ + 67, + 83, + 360, + 155 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}" + }, + { + "bbox": [ + 67, + 83, + 360, + 155 + ], + "type": "text", + "content": " and maintain the same hyperparameters as in the end-to-end finetuning settings, while keeping the detector unchanged. The training process consists of three stages:" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 399, + 66, + 518, + 118 + ], + "blocks": [ + { + "bbox": [ + 399, + 66, + 518, + 118 + ], + "lines": [ + { + "bbox": [ + 399, + 66, + 518, + 118 + ], + "spans": [ + { + "bbox": [ + 399, + 66, + 518, + 118 + ], + "type": "table", + "html": "
Test-Time AugAPbox
No TTA65.2
+ More Queries65.3
+ SoftNMS [6]65.8
+ Flip Aug65.8
+ Multiscale Aug66.0
", + "image_path": "6165b8d321ec714c5d44d432ac6923a8fa593d185ccc1fa1cd8b55f45852f7e8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 160, + 541, + 285 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 80, + 160, + 541, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 160, + 541, + 184 + ], + "spans": [ + { + "bbox": [ + 80, + 160, + 541, + 184 + ], + "type": "text", + "content": "1. Initial Training: Train on Objects365 for 12 epochs with an image resolution of " + }, + { + "bbox": [ + 80, + 160, + 541, + 184 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 80, + 160, + 541, + 184 + ], + "type": "text", + "content": ", a total batch size of 256, and a learning rate of 2e-4, which is divided by 10 at the 10th epoch." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 190, + 541, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 190, + 541, + 213 + ], + "spans": [ + { + "bbox": [ + 79, + 190, + 541, + 213 + ], + "type": "text", + "content": "2. Increasing Resolution: Continue training on Objects365 for 6 epochs with a resolution of " + }, + { + "bbox": [ + 79, + 190, + 541, + 213 + ], + "type": "inline_equation", + "content": "1536 \\times 1536" + }, + { + "bbox": [ + 79, + 190, + 541, + 213 + ], + "type": "text", + "content": ", a total batch size of 128, and a learning rate of 5e-5, which is divided by 10 at the 5th epoch." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 220, + 541, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 220, + 541, + 243 + ], + "spans": [ + { + "bbox": [ + 79, + 220, + 541, + 243 + ], + "type": "text", + "content": "3. Finetuning: Finetune on COCO dataset for 12 epochs with an image resolution of " + }, + { + "bbox": [ + 79, + 220, + 541, + 243 + ], + "type": "inline_equation", + "content": "1728 \\times 1728" + }, + { + "bbox": [ + 79, + 220, + 541, + 243 + ], + "type": "text", + "content": ", a total batch size of 64, and a learning rate of 5e-5, which is divided by 10 at the 8th epoch." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 80, + 250, + 541, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 250, + 541, + 285 + ], + "spans": [ + { + "bbox": [ + 80, + 250, + 541, + 285 + ], + "type": "text", + "content": "4. Further Increasing Resolution: Further finetune on COCO dataset for 3 epochs with a resolution of " + }, + { + "bbox": [ + 80, + 250, + 541, + 285 + ], + "type": "inline_equation", + "content": "1824 \\times 1824" + }, + { + "bbox": [ + 80, + 250, + 541, + 285 + ], + "type": "text", + "content": ", a total batch size of 64. To save GPU memory, we use SGD optimizer instead of Adam, with a learning rate of 5e-3, which is divided by 10 at the 2th epoch." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 292, + 541, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 292, + 541, + 315 + ], + "spans": [ + { + "bbox": [ + 67, + 292, + 541, + 315 + ], + "type": "text", + "content": "We apply a series of test-time augmentation techniques to further improve the performance, detailed in Tab. 25." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 335, + 203, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 335, + 203, + 347 + ], + "spans": [ + { + "bbox": [ + 67, + 335, + 203, + 347 + ], + "type": "text", + "content": "C Additional Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 361, + 261, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 261, + 375 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 261, + 375 + ], + "type": "text", + "content": "C.1 PEcore: Robust Image Pretraining" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 380, + 542, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 542, + 441 + ], + "type": "text", + "content": "In Tab. 26, we present the raw data for the robustness metrics in Fig. 2. Across the board, each change improved almost all metrics (with the exception of progressive resolution slightly hurting the average and mask regularization slightly hurting ImageNet Adversarial). The fact that there were no tradeoffs to these changes, indicate that their improvements to the features are general. This could be why most of these changes improved performance for downstream tasks as well." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 447, + 541, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 447, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 447, + 541, + 495 + ], + "type": "text", + "content": "Note that in §2.1, we only discuss changes that we know to work. There are several changes that we have tried that do not work (i.e., do not improve performance or lower performance). For instance: average pooling instead of using a class token, increasing the text tower size, using hue or contrast jitter, and maintaining the same resolution throughout training but dropping tokens instead of progressive resolution (FLIP-style)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 500, + 541, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 500, + 541, + 537 + ], + "spans": [ + { + "bbox": [ + 67, + 500, + 541, + 537 + ], + "type": "text", + "content": "We also find increasing batch size and increasing training iterations for an L scale model to have equivalent effects. This is in contrast to the batch size scaling observed by [160], but it is possible that this difference is down to a hyperparameter issue." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 190, + 546, + 424, + 670 + ], + "blocks": [ + { + "bbox": [ + 373, + 121, + 542, + 143 + ], + "lines": [ + { + "bbox": [ + 373, + 121, + 542, + 143 + ], + "spans": [ + { + "bbox": [ + 373, + 121, + 542, + 143 + ], + "type": "text", + "content": "Table 25 Test-Time Aug for system-level comparison on COCO in Tab. 15." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 190, + 546, + 424, + 670 + ], + "lines": [ + { + "bbox": [ + 190, + 546, + 424, + 670 + ], + "spans": [ + { + "bbox": [ + 190, + 546, + 424, + 670 + ], + "type": "table", + "html": "
StepZero-Shot Classification
Avg Class.ImageNet w1/2[26]ImageNet v2[12]ObjectNet IN Classes [4]ImageNet Adversarial [47]ImageNet Reminims [46]ImageNet Sketch [143]
1Baseline75.378.971.973.768.391.167.8
2Progressive Resolution75.178.971.872.469.990.567.0
3High Batch Size76.279.572.874.171.891.068.1
4LAMB and High LR76.979.973.374.373.591.568.6
5High Resolution (336)78.380.473.875.679.292.068.8
62D RoPE79.280.774.177.480.992.769.4
7Attention Pooling80.181.074.878.482.993.469.9
8Data Augmentation80.881.175.280.883.193.571.2
9Mask Regularization80.981.375.380.982.893.871.2
", + "image_path": "ddb05a6977bad6a63462785d89e782e38ab42d14be63d629f21a1828f6517a27.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 672, + 541, + 694 + ], + "lines": [ + { + "bbox": [ + 67, + 672, + 541, + 694 + ], + "spans": [ + { + "bbox": [ + 67, + 672, + 541, + 694 + ], + "type": "text", + "content": "Table 26 Robust Image Pretraining Full Results. Raw results for the robustness metrics metrics in Fig. 2. Almost every change improves every metric, but some metrics are improved more than others (e.g., ObjectNet and ImageNet-A)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 79, + 708, + 227, + 719 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 708, + 227, + 719 + ], + "spans": [ + { + "bbox": [ + 79, + 708, + 227, + 719 + ], + "type": "text", + "content": "8https://github.com/jozhang97/DETA" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 63, + 229, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 63, + 229, + 77 + ], + "spans": [ + { + "bbox": [ + 67, + 63, + 229, + 77 + ], + "type": "text", + "content": "C.2 " + }, + { + "bbox": [ + 67, + 63, + 229, + 77 + ], + "type": "inline_equation", + "content": "\\mathsf{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 63, + 229, + 77 + ], + "type": "text", + "content": " : Video Data Scaling" + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 160, + 87, + 449, + 190 + ], + "blocks": [ + { + "bbox": [ + 160, + 87, + 449, + 190 + ], + "lines": [ + { + "bbox": [ + 160, + 87, + 449, + 190 + ], + "spans": [ + { + "bbox": [ + 160, + 87, + 449, + 190 + ], + "type": "table", + "html": "
Video Data SizeAverage ImageImage Zero-ShotVideo Zero-Shot
ImageNet w1 [20]ImageNet v2 [112]ObjectNet In Classes [4]ImageNet adversarial [47]MS-COCO ts→img [76]MS-COCO img→ts [76]MS-COCO ts→img [76]Average VideoKeritics 400 [55]Keritics 600 [55]Keritics 700 [55]UCF 101 [126]HMDB 51 [62]MSR-VTT ts→vid [153]MSR-VTT vid→ts [153]
0M77.083.978.686.690.352.170.357.070.369.461.678.547.440.531.4
3M77.784.178.886.690.953.374.261.672.472.264.288.553.842.837.6
6M78.084.279.086.791.154.072.763.673.573.466.088.954.644.943.6
8M78.484.279.287.091.654.973.664.874.574.567.789.555.346.945.5
11M78.684.279.287.291.855.473.865.275.175.067.689.755.647.745.8
14M78.884.279.287.591.955.774.365.575.475.367.989.955.847.846.3
17M78.984.279.287.792.055.874.365.875.775.568.290.256.048.346.7
", + "image_path": "389c8fd6f1342ba0acefdf83153292853134b64b77cc3dafeca0867b5135efc2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 226, + 542, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 226, + 542, + 310 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 542, + 310 + ], + "type": "text", + "content": "The detailed video data scaling results are presented in Tab. 27. Our experiments demonstrate that increasing the number of synthetic video data generated by the proposed video data engine enhances the performance of classification and retrieval on both image and video benchmarks. On image benchmarks, while improvements on ImageNet val and v2 plateaued earlier compared to ObjectNet and ImageNet Adversarial, MS-COCO retrieval performance continued to show gains. On video benchmarks, scaling synthetic video data consistently yields better performance for both classification and retrieval tasks. We expect that further scaling up the video data with our video data engine will continue to drive performance improvements." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 324, + 212, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 324, + 212, + 338 + ], + "spans": [ + { + "bbox": [ + 67, + 324, + 212, + 338 + ], + "type": "text", + "content": "C.3 " + }, + { + "bbox": [ + 67, + 324, + 212, + 338 + ], + "type": "inline_equation", + "content": "\\mathsf{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 324, + 212, + 338 + ], + "type": "text", + "content": " : Smaller Models" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 173, + 350, + 441, + 441 + ], + "blocks": [ + { + "bbox": [ + 67, + 193, + 541, + 215 + ], + "lines": [ + { + "bbox": [ + 67, + 193, + 541, + 215 + ], + "spans": [ + { + "bbox": [ + 67, + 193, + 541, + 215 + ], + "type": "text", + "content": "Table 27 Scaling Video Data. Increasing the number of synthetic video data generated by our proposed video data engine consistently enhances the performance of image and video classification and retrieval tasks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 173, + 350, + 441, + 441 + ], + "lines": [ + { + "bbox": [ + 173, + 350, + 441, + 441 + ], + "spans": [ + { + "bbox": [ + 173, + 350, + 441, + 441 + ], + "type": "table", + "html": "
ModelTeacher's TempModel ScaleZero-Shot Classification
Avg Class.ImageNet val [26]ImageNet v2 [112]ObjectNet JV Classes [4]ImageNet Adversarial [47]ImageNet Renditions [46]ImageNet Sketch [143]
vanilla pretrained model-B66.274.267.462.550.283.059.8
distillation×2B65.271.865.561.450.283.658.6
×1B68.074.968.164.754.185.361.1
×0.7B68.275.168.265.354.485.161.3
×0.5B68.375.268.265.354.285.261.4
", + "image_path": "3cde832c80ead650dd1257b3e36e558af544c1df53f380fb5c7963a9230eccf0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 444, + 542, + 477 + ], + "lines": [ + { + "bbox": [ + 67, + 444, + 542, + 477 + ], + "spans": [ + { + "bbox": [ + 67, + 444, + 542, + 477 + ], + "type": "text", + "content": "Table 28 Ablation Study on Teacher's Distribution Temperature. We evaluate the effect of varying temperatures on the teacher's distribution, using a pretrained vanilla CLIP model (ViT-B/14, resolution 224) as a baseline (details in §2.1). The models are finetuned via distillation with a short schedule of 50K steps." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 491, + 542, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 542, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 542, + 552 + ], + "type": "text", + "content": "Ablation: Distillation Temperature. To optimize the performance of smaller models (B and L-scales in Tab. 4), we utilize a distillation finetuning approach with " + }, + { + "bbox": [ + 67, + 491, + 542, + 552 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 491, + 542, + 552 + ], + "type": "text", + "content": " as the teacher model. During this process, both student and teacher models encode image and text inputs to compute image-to-text and text-to-image similarity distributions, similar to CLIP training [106]. The student's distributions are then optimized to match those of the teacher by minimizing KL-divergence loss on both image-to-text and text-to-image similarity distributions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 556, + 542, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 556, + 542, + 605 + ], + "spans": [ + { + "bbox": [ + 67, + 556, + 542, + 605 + ], + "type": "text", + "content": "We find that using a fixed and smaller temperature (i.e., higher logit scale), which controls the range of logits in the softmax, significantly enhances the effectiveness of distillation. This results in a sharper distribution for the teacher's distributions. In contrast, the student's temperature remains learnable, consistent with our pretraining procedure and CLIP training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 611, + 543, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 543, + 671 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 543, + 671 + ], + "type": "text", + "content": "In Tab. 28, we present an ablation study examining the impact of temperature on the teacher's distribution. For this analysis, we utilize a pretrained vanilla CLIP model (ViT-B/14, resolution 224), which serves as a baseline for comparison (see §2.1 for details). The models are finetuned using distillation with a concise schedule of 50K steps. Notably, our results show that employing a smaller temperature for the teacher's distributions yields improved performance on zero-shot ImageNet benchmarks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 680, + 543, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 543, + 717 + ], + "type": "text", + "content": "Building strong smaller models. In Tab. 29, we demonstrate our step-by-step training strategy for building strong smaller models at the L scale, as discussed in §2.4. Specifically, we outline our approach to image pretraining, image distillation, and video finetuning, and distillation. Leveraging the robust foundation established by our" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 91, + 62, + 518, + 144 + ], + "blocks": [ + { + "bbox": [ + 91, + 62, + 518, + 144 + ], + "lines": [ + { + "bbox": [ + 91, + 62, + 518, + 144 + ], + "spans": [ + { + "bbox": [ + 91, + 62, + 518, + 144 + ], + "type": "table", + "html": "
ModelStageImage Zero-ShotVideo Zero-Shot
Average ImageImageNetv1 [26]ImageNetv2 [112]ObjectNetIN Classes [4]ImageNetAdversarial [47]MS-COCOv1→v1img [76]MS-COCOimg→v1img [76]Average VideoKinetics400 [55]Kinetics600 [53]Kinetics700 [55]UCF101 [126]HMDB 51 [62]MS-RVTTv1→v1v1d [153]MS-RVTTv1→v1v1d [153]
SigLIP2-L/16 [138]-76.083.177.484.484.355.371.456.265.362.556.886.749.341.531.4
PEcoreLimage pretraining75.182.976.881.885.653.070.459.068.067.758.585.557.742.033.4
PEcoreL+image distillation from PEcoreG77.683.678.184.488.956.074.764.573.072.664.886.558.047.948.4
PEcoreL+video finetuning78.083.577.984.789.057.175.965.373.472.765.387.158.550.350.1
", + "image_path": "abc1114a11768f47e364fe16d4aef24261b196f0d49fc64674d66412edf9825b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 201, + 542, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 201, + 542, + 237 + ], + "spans": [ + { + "bbox": [ + 67, + 201, + 542, + 237 + ], + "type": "text", + "content": "pretraining techniques (§2.1), we show that distilling from " + }, + { + "bbox": [ + 67, + 201, + 542, + 237 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 201, + 542, + 237 + ], + "type": "text", + "content": ", our strongest unified perception encoder, yields improvements on both image and video benchmarks. Furthermore, a short-scheduled video finetuning provides an additional boost in performance on both benchmarks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 251, + 227, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 227, + 266 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 227, + 266 + ], + "type": "text", + "content": "C.4 " + }, + { + "bbox": [ + 67, + 251, + 227, + 266 + ], + "type": "inline_equation", + "content": "\\mathsf{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 251, + 227, + 266 + ], + "type": "text", + "content": ": Additional Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": "Analogous to Tab. 10, in Tab. 30, we compare " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": " with dynamic resolution setting [77, 82]. More specifically, we use up to 4 tiles, following after a thumbnail, which is a whole image resized into " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": ". With the maximum number of tiles of 4, the model can cover " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "\\{1 \\times 1, 1 \\times 2, 1 \\times 3, 1 \\times 4, 2 \\times 1, 2 \\times 2, 3 \\times 1, 4 \\times 1\\}" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": " tile ratios. Similar to the Tab. 10, 11, 12 in the main paper, we show that " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": " largely outperforms the baseline vision encoders by large margins across all categories of MLLM tasks. Note that " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": " has been alignment-tuned with native resolution input, as opposed to e.g., InternViT 2.5, which has been midtrained with dynamic tiling, which shows " + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 270, + 543, + 356 + ], + "type": "text", + "content": " 's strong generality for different input formats." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "type": "text", + "content": "Next, in Tab. 31, 32, 33, we show the breakdowns of RefCOCO/+/g [56] with Llama 3.1-instruct 8B as language model, Qwen2.5 LM 7B as language model, and with Llama 3.1-instruct 8B and dynamic tiling " + }, + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "type": "inline_equation", + "content": "(4 + 1)" + }, + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "type": "text", + "content": ", respectively. In our SFT data, we have VisualGenome [60], DCI [139], and Flickr30K [103] as grounding datasets, and RefCOCO/+/g are unseen. We therefore report zero-shot performance of the MLLMs to evaluate spatial understanding capability of the vision encoders. Overall, " + }, + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 360, + 543, + 445 + ], + "type": "text", + "content": " L or G show the best performance across all RefCOCO splits, except with Qwen2.5 LM. This is because (1) InternViT 2.5 6B is midtrained with Qwen2 LM, and (2) during pre/mid-training the training data of RefCOCO/+/g are seen." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 68, + 454, + 545, + 644 + ], + "blocks": [ + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "lines": [ + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "spans": [ + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "type": "text", + "content": "Table 29 Building Strong Smaller Models. This table illustrates the step-by-step process of developing the " + }, + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}336\\mathrm{px}" + }, + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "type": "text", + "content": " model, as outlined in §2.4. Starting with the pretrained " + }, + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{L}" + }, + { + "bbox": [ + 67, + 146, + 542, + 179 + ], + "type": "text", + "content": ", both image distillation, along with video finetuning, enhance performance across image and video benchmarks, resulting in a unified L-scale model." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 454, + 545, + 644 + ], + "lines": [ + { + "bbox": [ + 68, + 454, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 68, + 454, + 545, + 644 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolution Patch SizeOCR / Chart / Doc. Q&AVisual Q&ACaptioningVideo
Avg. OCR QAAvg. VQAAvg. Cap.Flicker CIDEr [157]COCO CIDEr [76]No Cap CIDEr [1]Avg. Ground. RetCOCOg+ [58]Avg. VideoVdeoMME Acc [38]STAR Acc [148]TCIF-QA Acc [53]EgoSchema Acc [89]MVBench Acc [68]PerceptionTest Acc [105]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1461.871.162.540.273.374.665.364.988.579.8113.490.4133.5116.267.148.044.847.162.739.046.048.3
MetaCLIP-G [152]1.8B224/1460.368.161.339.172.874.965.465.988.280.1114.291.8134.4116.566.049.046.546.562.545.044.748.9
PElang G†1.7B*224/1470.279.879.147.574.676.070.664.388.380.6116.392.0136.4120.569.556.649.055.969.961.250.053.6
576 Tokens per Tile
CLIP [106]0.3B336/1469.676.878.250.372.976.371.864.988.080.4114.090.9134.4116.668.550.846.652.265.044.646.349.9
AIMv2-L [37]0.3B336/1466.774.174.945.272.477.473.565.689.081.7116.492.5137.1119.566.654.143.454.370.656.047.352.7
SigLIP2-so [138]0.4B384/1655.561.454.933.372.376.570.166.088.681.2118.095.8138.3119.866.554.344.952.866.858.649.653.3
SigLIP2-g-opt [138]1.1B384/1656.263.155.334.072.477.070.366.789.681.6117.794.9137.8120.366.553.946.253.966.653.848.554.7
PElang G†1.7B*336/1477.582.188.561.877.479.780.266.489.882.5120.397.4140.2123.271.959.849.462.774.164.053.155.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1656.966.056.534.370.976.469.966.288.481.2117.894.7137.8120.967.846.247.044.966.739.234.545.1
PEcoreL0.3B448/1467.172.478.346.471.276.474.063.788.879.0113.991.5134.5115.762.951.447.051.262.749.647.850.1
PElang L0.3B448/1478.382.889.365.275.978.578.864.489.681.3117.894.7138.1120.771.656.547.057.268.059.852.354.7
AIMv2 3B [37]2.7B448/1467.573.078.246.572.278.879.266.288.381.7119.095.8139.7121.565.154.049.655.467.349.649.952.5
InternViT2.5 6B [18]5.5B448/1467.474.674.347.672.975.971.364.887.779.7110.485.3132.5113.556.852.046.049.665.050.649.651.3
PEcoreG1.9B448/1468.073.481.247.669.776.474.362.589.179.6113.091.6134.5112.967.653.246.054.367.051.248.752.0
PElang G1.7B*448/1478.681.889.867.875.080.382.366.789.682.8119.695.2140.3123.471.859.049.661.873.960.052.656.3
", + "image_path": "b2f874bec500ec2a5eb8600f35c03af98da22cff32721a4b4f68cdca969810fc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "lines": [ + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "spans": [ + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "type": "text", + "content": "Table 30 4+1 Tile Llama 8B MLLM Results. Llama 3.1-instruct 8B [82] is used as a language model. " + }, + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "type": "inline_equation", + "content": "{}^{*}\\mathrm{PE}_{\\mathrm{lang}}" + }, + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "type": "text", + "content": " has 1.7B parameters since we discard the last 3 layers during language alignment. All MLLMs are trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of " + }, + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "type": "inline_equation", + "content": "448\\times 448" + }, + { + "bbox": [ + 67, + 647, + 543, + 702 + ], + "type": "text", + "content": " (or the corresponding resolution for each encoder). The image tiles follow after a thumbnail input, similar to prior work [77]. Evaluation on an model that was interpolated without additional training (i.e., zero-shot resolution)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 751 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 181, + 63, + 433, + 277 + ], + "blocks": [ + { + "bbox": [ + 181, + 63, + 433, + 277 + ], + "lines": [ + { + "bbox": [ + 181, + 63, + 433, + 277 + ], + "spans": [ + { + "bbox": [ + 181, + 63, + 433, + 277 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolution Path SizeAvg. Ground.
RefCOCO val/ [56]RefCOCO testA [56]RefCOCO val/ [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]RefCOCO+ val/ [56]RefCOCO+ testA [56]
256 Tokens per Image
MetaCLIP-L [152]0.3B224/1460.663.656.767.554.158.948.867.267.8
MetaCLIP-G [152]1.8B224/1460.562.056.567.853.558.749.268.268.3
PEiang G†1.7B*224/1465.767.764.470.958.362.056.673.274.4
576 Tokens per Image
CLIP [106]0.3B336/1465.066.761.471.657.662.554.573.272.8
AIMv2-L [37]0.3B336/1463.365.461.669.655.060.052.071.171.5
AIMv2-L Dist. [37]0.3B336/1462.664.861.069.454.459.051.370.870.0
SigLIP2-so [138]0.4B384/1667.468.866.571.060.361.858.576.276.0
SigLIP2-g-opt [138]1.1B384/1666.567.966.170.158.861.757.175.575.0
PEiang G†1.7B*336/1468.969.867.573.261.564.060.877.377.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1466.969.366.772.658.363.157.274.274.0
SigLIP2-so [138]0.4B512/1669.671.469.274.461.364.860.377.977.2
PEcore L0.3B448/1459.761.755.366.953.158.848.068.567.5
PEiang L0.3B448/1470.571.870.273.063.766.162.778.878.9
DINOv2 [98]1.1B448/1464.967.262.570.557.061.054.573.173.1
AIMv2 3B [37]2.7B448/1436.137.634.140.732.736.232.036.938.6
InternViT2.5 6B [18]5.5B448/1468.070.267.672.260.664.058.775.375.2
PEcore G1.9B448/1466.668.364.472.358.762.756.075.175.0
PEiang G1.7B*448/1471.371.969.975.164.267.363.079.479.2
", + "image_path": "c4c7c849b0dc9295c39690b836481b60a614a3ea89eddad3e9fbbbcb72ed2aaa.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 181, + 300, + 433, + 458 + ], + "blocks": [ + { + "bbox": [ + 67, + 278, + 541, + 291 + ], + "lines": [ + { + "bbox": [ + 67, + 278, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 67, + 278, + 541, + 291 + ], + "type": "text", + "content": "Table 31 Llama MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used for zeroshot RefCOCO/+/g grounding." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 181, + 300, + 433, + 458 + ], + "lines": [ + { + "bbox": [ + 181, + 300, + 433, + 458 + ], + "spans": [ + { + "bbox": [ + 181, + 300, + 433, + 458 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolutionPatch SizeAvg. Ground.
RefCOCO var[56]RefCOCO texA[56]RefCOCO var[56]RefCOCO+ texA[56]RefCOCO+ var[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]RefCOCO+ texA[56]
576 Tokens per Image
SigLIP2-so [138]0.4B384/1670.073.673.074.360.962.759.978.477.2
SigLIP2-g-opt [138]1.1B384/1669.973.372.473.660.562.360.778.478.2
PEiangG†1.7B*336/1470.173.472.075.362.064.261.278.477.7
1024 Tokens per Image
InternViT2.5 L [18]0.3B448/1468.172.469.174.159.362.456.675.275.5
SigLIP2-so [138]0.4B512/1670.574.173.774.461.762.961.078.677.9
PEcoreL0.3B448/1466.570.467.871.557.761.156.275.875.3
PEiangL0.3B448/1470.474.472.674.662.264.062.079.078.7
DINOv2 [98]1.1B448/1469.373.471.173.960.063.959.076.476.7
AIMv2 3B [37]2.7B448/1467.671.467.772.359.261.256.376.476.4
InternViT2.5 6B‡ [18]5.5B448/1472.877.776.577.163.666.062.280.079.5
PEcoreG1.9B448/1470.574.071.875.861.564.860.178.577.3
PEiangG1.7B*448/1472.175.472.976.364.265.962.979.779.7
", + "image_path": "d146e5ba36590a72990779f2d2fff0d2f01b0733d571b364a0116dbed224b453.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 181, + 492, + 433, + 683 + ], + "blocks": [ + { + "bbox": [ + 67, + 460, + 541, + 483 + ], + "lines": [ + { + "bbox": [ + 67, + 460, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 460, + 541, + 483 + ], + "type": "text", + "content": "Table 32 Qwen MLLM-Based Zereshot RefCOCO. QwenLM 2.5 7B [155] is used as a language model. All MLLMs report zereshot results on RefCOCO/+/g datasets. " + }, + { + "bbox": [ + 67, + 460, + 541, + 483 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 67, + 460, + 541, + 483 + ], + "type": "text", + "content": "Trained with RefCOCO/+/g beforehand." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 181, + 492, + 433, + 683 + ], + "lines": [ + { + "bbox": [ + 181, + 492, + 433, + 683 + ], + "spans": [ + { + "bbox": [ + 181, + 492, + 433, + 683 + ], + "type": "table", + "html": "
ModelEncoder ParamsResolutionAvg. Ground.Grounding
RefCOCORefCOCORefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+RefCOCO+
val [56]val [56]val [56]val [56]val [56]val [56]val [56]val [56]
256 Tokens per Tile
MetaCLIP-L [152]0.3B224/1467.169.365.073.260.564.956.574.373.4
MetaCLIP-G [152]1.8B224/1466.067.963.271.959.262.955.873.873.1
PElang G†1.7B*224/1470.371.669.673.763.366.262.678.678.2
576 Tokens per Tile
CLIP [106]0.3B336/1468.570.766.674.161.165.958.176.075.1
AIMv2-L [37]0.3B336/1466.668.465.571.459.363.456.574.274.2
SigLIP2-so [138]0.4B384/1666.567.966.170.158.861.757.175.575.0
SigLIP2-g-opt [138]1.1B384/1666.568.265.670.159.062.358.074.874.0
PElang G†1.7B*336/1471.973.671.574.964.867.363.980.480.6
1024 Tokens per Tile
SigLIP2-so [138]0.4B512/1667.869.267.871.259.962.559.076.976.0
PEcoreL0.3B448/1462.965.359.969.256.662.252.070.170.0
PElang L0.3B448/1471.673.070.874.365.267.262.979.779.7
AIMv2 3B [37]2.7B448/1465.166.962.971.158.162.455.671.872.2
InternViT2.5 B‡ [18]5.5B448/1456.861.056.465.851.057.046.158.058.9
PEcoreG1.9B448/1467.669.265.872.459.964.158.375.175.6
PElang G1.7B*448/1471.872.670.774.664.866.664.680.480.3
", + "image_path": "c316ddc2973a2c132e703a17948017da60b2c81884dbd22cf8abbd5cd3d8dd51.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 685, + 541, + 719 + ], + "lines": [ + { + "bbox": [ + 67, + 685, + 541, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 685, + 541, + 719 + ], + "type": "text", + "content": "Table 33 4+1 Tile Llama 8B MLLM-Based Zeroshot RefCOCO. Llama 3.1-instruct 8B [82] is used as a language model. All trained with dynamic tiling for different image sizes and aspect ratio. We use up to 4 image tiles of the encoder's native resolution, with a thumbnail image in front, similar to prior work [77]. " + }, + { + "bbox": [ + 67, + 685, + 541, + 719 + ], + "type": "inline_equation", + "content": "{}^{ \\ddagger }" + }, + { + "bbox": [ + 67, + 685, + 541, + 719 + ], + "type": "text", + "content": " Trained with RefCOCO/+/g beforehand." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 63, + 292, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 63, + 292, + 77 + ], + "spans": [ + { + "bbox": [ + 68, + 63, + 292, + 77 + ], + "type": "text", + "content": "C.5 PEspatial: Additional Qualitative Results" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 87, + 303, + 601 + ], + "blocks": [ + { + "bbox": [ + 69, + 87, + 303, + 601 + ], + "lines": [ + { + "bbox": [ + 69, + 87, + 303, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 87, + 303, + 601 + ], + "type": "image", + "image_path": "a2b482a782d2db69b553dc95ebc085fdd0e0dfdd61c5f58feb5493e4e9b8bf2f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "lines": [ + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "text", + "content": "Figure 20 More Visualizations of the feature space following Fig. 17. After the image itself, column 1 is " + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "text", + "content": " last layer features, column 2 is " + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "text", + "content": " aligned to its own layer 41, column 3 is " + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "text", + "content": " aligned to SAM 2.1-L [111] mask logits, and column 4 is " + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{core}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "text", + "content": " aligned to both, denoted " + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_{\\mathrm{spatial}}\\mathrm{G}" + }, + { + "bbox": [ + 67, + 601, + 543, + 635 + ], + "type": "text", + "content": ". See §B.3.2 for visualization method." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 308, + 88, + 541, + 599 + ], + "blocks": [ + { + "bbox": [ + 308, + 88, + 541, + 599 + ], + "lines": [ + { + "bbox": [ + 308, + 88, + 541, + 599 + ], + "spans": [ + { + "bbox": [ + 308, + 88, + 541, + 599 + ], + "type": "image", + "image_path": "775649cb979ad831b819e01ac5e03a0dcd2653c5882b8a59349bf10a1ceb5b89.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 139, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 139, + 77 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 139, + 77 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 74, + 90, + 543, + 706 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 78, + 90, + 542, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 90, + 542, + 112 + ], + "spans": [ + { + "bbox": [ + 78, + 90, + 542, + 112 + ], + "type": "text", + "content": "[1] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. Nocaps: Novel object captioning at scale. In ICCV, 2019. 14, 15, 16, 32" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 118, + 543, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 118, + 543, + 205 + ], + "spans": [ + { + "bbox": [ + 78, + 118, + 543, + 205 + ], + "type": "text", + "content": "[2] Pravesh Agrawal, Szymon Antoniak, Emma Bou Hanna, Baptiste Bout, Devendra Chaplot, Jessica Chudnovsky, Diogo Costa, Baudouin De Monicault, Saurabh Garg, Theophile Gervet, Soham Ghosh, Amélie Héliou, Paul Jacob, Albert Q. Jiang, Kartik Khandelwal, Timothee Lacroix, Guillaume Lample, Diego Las Casas, Thibaut Lavril, Teven Le Scao, Andy Lo, William Marshall, Louis Martin, Arthur Mensch, Pavankumar Muddireddy, Valera Nemychnikova, Marie Pellat, Patrick Von Platen, Nikhil Raghuraman, Baptiste Rozière, Alexandre Sablayrolles, Lucile Saulnier, Romain Sauvestre, Wendy Shang, Roman Soletskyi, Lawrence Stewart, Pierre Stock, Joachim Studnia, Sandeep Subramanian, Sagar Vaze, Thomas Wang, and Sophia Yang. Pixtral 12b. arXiv:2410.07073, 2024. 20" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 211, + 543, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 211, + 543, + 245 + ], + "spans": [ + { + "bbox": [ + 78, + 211, + 543, + 245 + ], + "type": "text", + "content": "[3] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv:2308.12966, 2023. 20" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 250, + 543, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 250, + 543, + 283 + ], + "spans": [ + { + "bbox": [ + 78, + 250, + 543, + 283 + ], + "type": "text", + "content": "[4] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models. In NeurIPS, 2019. 3, 4, 6, 8, 9, 10, 30, 31, 32" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 289, + 543, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 289, + 543, + 366 + ], + "spans": [ + { + "bbox": [ + 78, + 289, + 543, + 366 + ], + "type": "text", + "content": "[5] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, Thomas Unterthiner, Daniel Keysers, Skanda Koppula, Fangyu Liu, Adam Grycner, Alexey A. Gritsenko, Neil Houlsby, Manoj Kumar, Keran Rong, Julian Eisenschlos, Rishabh Kabra, Matthias Bauer, Matko Bosnjak, Xi Chen, Matthias Minderer, Paul Voigtlaender, Ioana Bica, Ivana Balazevic, Joan Puigcerver, Pinelopi Papalampidi, Olivier J. Henaff, Xi Xiong, Radu Soricut, Jeremiah Harmsen, and Xiaohua Zhai. PaliGemma: A versatile 3b VLM for transfer. arXiv:2407.07726, 2024. 20" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 372, + 541, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 372, + 541, + 394 + ], + "spans": [ + { + "bbox": [ + 78, + 372, + 541, + 394 + ], + "type": "text", + "content": "[6] Navaneeth Bodla, Bharat Singh, Rama Chellappa, and Larry S Davis. Soft-NMS-Improving object detection with one line of code. In ICCV, 2017. 30" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 78, + 399, + 541, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 399, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 78, + 399, + 541, + 422 + ], + "type": "text", + "content": "[7] Daniel Bolya, Chaitanya Ryali, Judy Hoffman, and Christoph Feichtenhofer. Window attention is bugged: how not to interpolate position embeddings. In *ICLR*, 2023. 11, 29" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 427, + 542, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 427, + 542, + 460 + ], + "spans": [ + { + "bbox": [ + 78, + 427, + 542, + 460 + ], + "type": "text", + "content": "[8] Florian Bordes, Randall Balestriero, Quentin Garrido, Adrien Bardes, and Pascal Vincent. Guillotine regularization: Why removing layers is needed to improve generalization in self-supervised learning. arXiv:2206.13378, 2022. 20" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 78, + 466, + 541, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 466, + 541, + 488 + ], + "spans": [ + { + "bbox": [ + 78, + 466, + 541, + 488 + ], + "type": "text", + "content": "[9] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - Mining discriminative components with random forests. In ECCV, 2014. 9" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 74, + 494, + 542, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 494, + 542, + 516 + ], + "spans": [ + { + "bbox": [ + 74, + 494, + 542, + 516 + ], + "type": "text", + "content": "[10] Gary Bradski. The OpenCV library. Dr. Dobb's Journal: Software Tools for the Professional Programmer, 2000. 22" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 521, + 542, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 521, + 542, + 544 + ], + "spans": [ + { + "bbox": [ + 74, + 521, + 542, + 544 + ], + "type": "text", + "content": "[11] Zhaowei Cai and Nuno Vasconcelos. Cascade R-CNN: Delving into high quality object detection. In CVPR, 2018. 19" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 74, + 550, + 542, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 550, + 542, + 572 + ], + "spans": [ + { + "bbox": [ + 74, + 550, + 542, + 572 + ], + "type": "text", + "content": "[12] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020. 19" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 74, + 578, + 542, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 578, + 542, + 611 + ], + "spans": [ + { + "bbox": [ + 74, + 578, + 542, + 611 + ], + "type": "text", + "content": "[13] Wenhao Chai, Enxin Song, Yilun Du, Chenlin Meng, Vashisht Madhavan, Omer Bar-Tal, Jeng-Neng Hwang, Saining Xie, and Christopher D. Manning. AuroraCap: Efficient, performant video detailed captioning and a new benchmark. In ICLR, 2025. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 74, + 616, + 542, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 616, + 542, + 650 + ], + "spans": [ + { + "bbox": [ + 74, + 616, + 542, + 650 + ], + "type": "text", + "content": "[14] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. Hybrid task cascade for instance segmentation. In CVPR, 2019. 19" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 74, + 655, + 541, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 655, + 541, + 678 + ], + "spans": [ + { + "bbox": [ + 74, + 655, + 541, + 678 + ], + "type": "text", + "content": "[15] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pretraining from pixels. In ICML, 2020. 20" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 74, + 683, + 541, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 683, + 541, + 706 + ], + "spans": [ + { + "bbox": [ + 74, + 683, + 541, + 706 + ], + "type": "text", + "content": "[16] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020. 20" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 72, + 64, + 542, + 717 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 73, + 64, + 542, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 64, + 542, + 120 + ], + "spans": [ + { + "bbox": [ + 73, + 64, + 542, + 120 + ], + "type": "text", + "content": "[17] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, Alexander Kolesnikov, Joan Puigcerver, Nan Ding, Keran Rong, Hassan Akbari, Gaurav Mishra, Linting Xue, Ashish Thapliyal, James Bradbury, Weicheng Kuo, Mojtaba Seyedhosseini, Chao Jia, Burcu Karagol Ayan, Carlos Riquelme, Andreas Steiner, Anelia Angelova, Xiaohua Zhai, Neil Houlsby, and Radu Soricut. Pali: A jointly-scaled multilingual language-image model. In ICLR, 2023. 8, 9" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 125, + 542, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 125, + 542, + 203 + ], + "spans": [ + { + "bbox": [ + 73, + 125, + 542, + 203 + ], + "type": "text", + "content": "[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, Lixin Gu, Xuehui Wang, Qingyun Li, Yimin Ren, Zixuan Chen, Jiapeng Luo, Jiahao Wang, Tan Jiang, Bo Wang, Conghui He, Botian Shi, Xingcheng Zhang, Han Lv, Yi Wang, Wenqi Shao, Pei Chu, Zhongying Tu, Tong He, Zhiyong Wu, Huipeng Deng, Jiaye Ge, Kai Chen, Kaipeng Zhang, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv:2412.05271, 2024. 11, 15, 16, 20, 32, 33" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 208, + 542, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 208, + 542, + 241 + ], + "spans": [ + { + "bbox": [ + 73, + 208, + 542, + 241 + ], + "type": "text", + "content": "[19] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyuan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. InternVL: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In CVPR, 2024. 1, 6, 7, 9, 10, 20, 26" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 246, + 542, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 246, + 542, + 270 + ], + "spans": [ + { + "bbox": [ + 73, + 246, + 542, + 270 + ], + "type": "text", + "content": "[20] Gong Cheng, Junwei Han, and Xiaoqiang Lu. Remote sensing image scene classification: Benchmark and state of the art. Proceedings of the IEEE, 2017. 9" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 274, + 542, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 274, + 542, + 341 + ], + "spans": [ + { + "bbox": [ + 73, + 274, + 542, + 341 + ], + "type": "text", + "content": "[21] Jang Hyun Cho, Andrea Madotto, Effrosyni Mavroudi, Triantafyllos Afouras, Tushar Nagarajan, Muhammad Maaz, Yale Song, Tengyu Ma, Shuming Hu, Hanoona Rasheed, Peize Sun, Po-Yao Huang, Daniel Bolya, Suyog Jain, Miguel Martin, Huiyu Wang, Nikhila Ravi, Shashank Jain, Temmy Stark, Shane Moon, Babak Damavandi, Vivian Lee, Andrew Westbury, Salman Khan, Philipp Krahenbuhl, Piotr Dólar, Lorenzo Torresani, Kristen Grauman, and Christoph Feichtenhofer. Perceptionlm: Open-access data and models for detailed visual understanding. arXiv:2504.13180, 2025. 2, 5, 11, 14, 15, 16, 21" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 346, + 542, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 346, + 542, + 369 + ], + "spans": [ + { + "bbox": [ + 73, + 346, + 542, + 369 + ], + "type": "text", + "content": "[22] Seokju Cho, Heeseong Shin, Sunghwan Hong, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. CAT-Seg: Cost aggregation for open-vocabulary semantic segmentation. In CVPR, 2024. 20" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 374, + 542, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 374, + 542, + 396 + ], + "spans": [ + { + "bbox": [ + 73, + 374, + 542, + 396 + ], + "type": "text", + "content": "[23] Timothee Darcet, Maxime Oquab, Julien Mairal, and Piotr Bojanowski. Vision transformers need registers. In ICLR, 2024. 12, 17" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 402, + 542, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 402, + 542, + 490 + ], + "spans": [ + { + "bbox": [ + 73, + 402, + 542, + 490 + ], + "type": "text", + "content": "[24] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdulmohsin, Rodolphe Jenatton, Lucas Beyer, Michael Tschannen, Anurag Arnab, Xiao Wang, Carlos Riquelme, Matthias Minderer, Joan Puigcerver, Utku Evci, Manoj Kumar, Sjoerd van Steenkiste, Gamaleldin F. Elsayed, Aravindh Mahendran, Fisher Yu, Avital Oliver, Fantine Huot, Jasmijn Bastings, Mark Patrick Collier, Alexey Gritsenko, Vighnesh Birodkar, Cristina Vasconcelos, Yi Tay, Thomas Mensink, Alexander Kolesnikov, Filip Pavetic, Dustin Tran, Thomas Kipf, Mario Lučić, Xiaohua Zhai, Daniel Keysers, Jeremiah Harmsen, and Neil Houlsby. Scaling vision transformers to 22 billion parameters. In ICML, 2023. 1, 9" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 72, + 495, + 542, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 495, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 72, + 495, + 542, + 594 + ], + "type": "text", + "content": "[25] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, Jiasen Lu, Taira Anderson, Erin Bransom, Kiana Ehsani, Huong Ngo, YenSung Chen, Ajay Patel, Mark Yatskar, Chris Callison-Burch, Andrew Head, Rose Hendrix, Favyen Bastani, Eli VanderBilt, Nathan Lambert, Yvonne Chou, Arnavi Chheda, Jenna Sparks, Sam Skjonsberg, Michael Schmitz, Aaron Sarnat, Byron Bischoff, Pete Walsh, Chris Newell, Piper Wolters, Tanmay Gupta, Kuo-Hao Zeng, Jon Borchardt, Dirk Groeneveld, Crystal Nam, Sophie Lebrecht, Caitlin Wittlif, Carissa Schoenick, Oscar Michel, Ranjay Krishna, Luca Weihs, Noah A. Smith, Hannaneh Hajishirzi, Ross Girshick, Ali Farhadi, and Aniruddha Kembhavi. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv:2409.17146, 2024. 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 600, + 542, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 600, + 542, + 623 + ], + "spans": [ + { + "bbox": [ + 73, + 600, + 542, + 623 + ], + "type": "text", + "content": "[26] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In CVPR, 2009. 1, 3, 6, 8, 9, 10, 30, 31, 32" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 628, + 542, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 628, + 542, + 651 + ], + "spans": [ + { + "bbox": [ + 73, + 628, + 542, + 651 + ], + "type": "text", + "content": "[27] Karan Desai and Justin Johnson. VirTex: Learning visual representations from textual annotations. In CVPR, 2021. 20" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 73, + 656, + 542, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 656, + 542, + 677 + ], + "spans": [ + { + "bbox": [ + 73, + 656, + 542, + 677 + ], + "type": "text", + "content": "[28] Jian Ding, Nan Xue, Gui-Song Xia, and Dengxin Dai. Decoupling zero-shot semantic segmentation. In CVPR, 2022. 20" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 73, + 684, + 542, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 684, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 73, + 684, + 542, + 717 + ], + "type": "text", + "content": "[29] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2020. 1, 8, 9" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 73, + 64, + 542, + 699 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 73, + 64, + 542, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 64, + 542, + 99 + ], + "spans": [ + { + "bbox": [ + 73, + 64, + 542, + 99 + ], + "type": "text", + "content": "[30] Alaaeldin El-Nouby, Michal Klein, Shuangfei Zhai, Miguel Angel Bautista, Alexander Toshev, Vaishaal Shankar, Joshua M Susskind, and Armand Joulin. Scalable pre-training of large autoregressive image models. In ICML, 2024. 20" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 103, + 542, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 103, + 542, + 138 + ], + "spans": [ + { + "bbox": [ + 73, + 103, + 542, + 138 + ], + "type": "text", + "content": "[31] David Fan, Shengbang Tong, Jiachen Zhu, Koustuv Sinha, Zhuang Liu, Xinlei Chen, Michael Rabbat, Nicolas Ballas, Yann LeCun, Amir Bar, and Saining Xie. Scaling language-free visual representation learning. arXiv:2504.01017, 2025. 12, 13" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 142, + 542, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 142, + 542, + 166 + ], + "spans": [ + { + "bbox": [ + 73, + 142, + 542, + 166 + ], + "type": "text", + "content": "[32] Lijie Fan, Dilip Krishnan, Phillip Isola, Dina Katabi, and Yonglong Tian. Improving CLIP training with language rewrites. In NeurIPS, 2023. 20" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 170, + 542, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 170, + 542, + 194 + ], + "spans": [ + { + "bbox": [ + 73, + 170, + 542, + 194 + ], + "type": "text", + "content": "[33] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. In ICLR, 2024. 1, 3, 9, 16, 20, 26" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 198, + 542, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 198, + 542, + 222 + ], + "spans": [ + { + "bbox": [ + 73, + 198, + 542, + 222 + ], + "type": "text", + "content": "[34] Yuxin Fang, Wen Wang, Binhui Xie, Quan Sun, Ledell Wu, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA: Exploring the limits of masked visual representation learning at scale. In CVPR, 2023. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 225, + 542, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 225, + 542, + 250 + ], + "spans": [ + { + "bbox": [ + 73, + 225, + 542, + 250 + ], + "type": "text", + "content": "[35] Yuxin Fang, Quan Sun, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA-02: A visual representation for neon genesis. Image and Vision Computing, 2024. 1, 19" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 253, + 542, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 253, + 542, + 267 + ], + "spans": [ + { + "bbox": [ + 73, + 253, + 542, + 267 + ], + "type": "text", + "content": "[36] Christoph Feichtenhofer. X3D: Expanding architectures for efficient video recognition. In CVPR, 2020. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 270, + 542, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 270, + 542, + 316 + ], + "spans": [ + { + "bbox": [ + 73, + 270, + 542, + 316 + ], + "type": "text", + "content": "[37] Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Béthune, Zhe Gan, Alexander T. Toshev, Marcin Eichner, Moin Nabi, Yinfei Yang, Joshua M. Susskind, and Alaaeldin El-Nouby. Multimodal autoregressive pre-training of large vision encoders. In CVPR, 2025. 1, 2, 10, 11, 15, 16, 19, 20, 32, 33" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 73, + 319, + 542, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 319, + 542, + 365 + ], + "spans": [ + { + "bbox": [ + 73, + 319, + 542, + 365 + ], + "type": "text", + "content": "[38] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-MME: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv:2405.21075, 2024. 14, 15, 16, 32" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 369, + 542, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 369, + 542, + 438 + ], + "spans": [ + { + "bbox": [ + 73, + 369, + 542, + 438 + ], + "type": "text", + "content": "[39] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, Eyal Orgad, Rahim Entezari, Giannis Daras, Sarah Pratt, Vivek Ramanujan, Yonatan Bitton, Kalyani Marathe, Stephen Mussmann, Richard Vencu, Mehdi Cherti, Ranjay Krishna, Pang Wei Koh, Olga Saukh, Alexander Ratner, Shuran Song, Hannaneh Hajishirzi, Ali Farhadi, Romain Beaumont, Sewoong Oh, Alex Dimakis, Jenia Jitsev, Yair Carmon, Vaishaal Shankar, and Ludwig Schmidt. DataComp: In search of the next generation of multimodal datasets. In NeurIPS, 2023. 10, 20" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 441, + 542, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 441, + 542, + 466 + ], + "spans": [ + { + "bbox": [ + 73, + 441, + 542, + 466 + ], + "type": "text", + "content": "[40] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in VQA matter: Elevating the role of image understanding in visual question answering. In CVPR, 2017. 14, 15, 16, 32" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 73, + 469, + 542, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 469, + 542, + 494 + ], + "spans": [ + { + "bbox": [ + 73, + 469, + 542, + 494 + ], + "type": "text", + "content": "[41] Agrim Gupta, Piotr Dollar, and Ross Girshick. LVIS: A dataset for large vocabulary instance segmentation. In CVPR, 2019. 19" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 73, + 498, + 542, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 498, + 542, + 521 + ], + "spans": [ + { + "bbox": [ + 73, + 498, + 542, + 521 + ], + "type": "text", + "content": "[42] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 73, + 525, + 542, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 525, + 542, + 539 + ], + "spans": [ + { + "bbox": [ + 73, + 525, + 542, + 539 + ], + "type": "text", + "content": "[43] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask R-CNN. In ICCV, 2017. 11, 12, 19, 29" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 73, + 542, + 542, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 542, + 542, + 567 + ], + "spans": [ + { + "bbox": [ + 73, + 542, + 542, + 567 + ], + "type": "text", + "content": "[44] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022. 1, 19" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 73, + 570, + 542, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 570, + 542, + 604 + ], + "spans": [ + { + "bbox": [ + 73, + 570, + 542, + 604 + ], + "type": "text", + "content": "[45] Greg Heinrich, Mike Ranzinger, Hongxu, Yin, Yao Lu, Jan Kautz, Andrew Tao, Bryan Catanzaro, and Pavlo Molchanov. RADIOv2.5: Improved baselines for agglomerative vision foundation models. In CVPR, 2025. 1, 10, 18" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 73, + 609, + 542, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 609, + 542, + 643 + ], + "spans": [ + { + "bbox": [ + 73, + 609, + 542, + 643 + ], + "type": "text", + "content": "[46] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In ICCV, 2021. 3, 8, 9, 30, 31" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 73, + 647, + 542, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 647, + 542, + 672 + ], + "spans": [ + { + "bbox": [ + 73, + 647, + 542, + 672 + ], + "type": "text", + "content": "[47] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In CVPR, 2021. 3, 4, 8, 9, 30, 31, 32" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 73, + 676, + 542, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 676, + 542, + 699 + ], + "spans": [ + { + "bbox": [ + 73, + 676, + 542, + 699 + ], + "type": "text", + "content": "[48] Byeongho Heo, Song Park, Dongyoon Han, and Sangdoo Yun. Rotary position embedding for vision transformer. In ECCV, 2024. 20" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 73, + 64, + 542, + 693 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 73, + 64, + 541, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 64, + 541, + 87 + ], + "spans": [ + { + "bbox": [ + 73, + 64, + 541, + 87 + ], + "type": "text", + "content": "[49] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. In NeurIPS Deep Learning Workshop, 2015. 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 91, + 542, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 91, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 73, + 91, + 542, + 114 + ], + "type": "text", + "content": "[50] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In ECCV, 2016. 14, 17" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 120, + 542, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 120, + 542, + 153 + ], + "spans": [ + { + "bbox": [ + 73, + 120, + 542, + 153 + ], + "type": "text", + "content": "[51] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. OpenCLIP, 2021. 3, 20" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 159, + 541, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 159, + 541, + 182 + ], + "spans": [ + { + "bbox": [ + 73, + 159, + 541, + 182 + ], + "type": "text", + "content": "[52] Allan Jabri, Andrew Owens, and Alexei Efros. Space-time correspondence as a contrastive random walk. In NeurIPS, 2020. 11, 19, 29" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 186, + 541, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 186, + 541, + 209 + ], + "spans": [ + { + "bbox": [ + 73, + 186, + 541, + 209 + ], + "type": "text", + "content": "[53] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. TGIF-QA: Toward spatio-temporal reasoning in visual question answering. In CVPR, 2017. 14, 15, 16, 32" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 215, + 541, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 215, + 541, + 248 + ], + "spans": [ + { + "bbox": [ + 73, + 215, + 541, + 248 + ], + "type": "text", + "content": "[54] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, 2021. 1, 20" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 254, + 541, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 254, + 541, + 287 + ], + "spans": [ + { + "bbox": [ + 73, + 254, + 541, + 287 + ], + "type": "text", + "content": "[55] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset. arXiv:1705.06950, 2017. 6, 9, 31, 32" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 293, + 541, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 293, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 73, + 293, + 541, + 316 + ], + "type": "text", + "content": "[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 14, 15, 16, 32, 33" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 73, + 320, + 541, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 320, + 541, + 343 + ], + "spans": [ + { + "bbox": [ + 73, + 320, + 541, + 343 + ], + "type": "text", + "content": "[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In ECCV, 2016. 14, 15, 16, 32" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 349, + 542, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 349, + 542, + 381 + ], + "spans": [ + { + "bbox": [ + 73, + 349, + 542, + 381 + ], + "type": "text", + "content": "[58] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dolkar, and Ross Girshick. Segment anything. In ICCV, 2023. 5, 18" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 387, + 541, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 387, + 541, + 410 + ], + "spans": [ + { + "bbox": [ + 73, + 387, + 541, + 410 + ], + "type": "text", + "content": "[59] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV Workshop, 2013. 9" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 73, + 415, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 415, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 73, + 415, + 541, + 449 + ], + "type": "text", + "content": "[60] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV, 2017. 27, 32" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 73, + 454, + 541, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 454, + 541, + 476 + ], + "spans": [ + { + "bbox": [ + 73, + 454, + 541, + 476 + ], + "type": "text", + "content": "[61] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In NeurIPS, 2012. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 73, + 482, + 541, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 482, + 541, + 505 + ], + "spans": [ + { + "bbox": [ + 73, + 482, + 541, + 505 + ], + "type": "text", + "content": "[62] Hildegard Kuehne, Hueihan Jhuang, Estfbaliz Garrote, Tomaso Poggio, and Thomas Serre. HMDB: a large video database for human motion recognition. In ICCV, 2011. 9, 31, 32" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 73, + 510, + 541, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 510, + 541, + 533 + ], + "spans": [ + { + "bbox": [ + 73, + 510, + 541, + 533 + ], + "type": "text", + "content": "[63] Weicheng Kuo, Yin Cui, Xiuye Gu, A. J. Piergiovanni, and Anelia Angelova. F-VLM: open-vocabulary object detection upon frozen vision and language models. In ICLR, 2023. 20" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 73, + 537, + 541, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 537, + 541, + 571 + ], + "spans": [ + { + "bbox": [ + 73, + 537, + 541, + 571 + ], + "type": "text", + "content": "[64] Zhengfeng Lai, Haotian Zhang, Bowen Zhang, Wentao Wu, Haoping Bai, Aleksei Timofeev, Xianzhi Du, Zhe Gan, Jiulong Shan, Chen-Nee Chuah, Yinfei Yang, and Meng Cao. VeCLIP: Improving CLIP training via visual-enriched captions. In ECCV, 2024. 5, 20" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 73, + 576, + 541, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 576, + 541, + 599 + ], + "spans": [ + { + "bbox": [ + 73, + 576, + 541, + 599 + ], + "type": "text", + "content": "[65] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models? In NeurIPS, 2024. 27" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 73, + 605, + 541, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 605, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 73, + 605, + 541, + 627 + ], + "type": "text", + "content": "[66] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. LLaVA-OneVision: Easy visual task transfer. TMLR, 2025. 16, 20, 22" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 73, + 632, + 541, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 632, + 541, + 655 + ], + "spans": [ + { + "bbox": [ + 73, + 632, + 541, + 655 + ], + "type": "text", + "content": "[67] Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In ICCV, 2023. 9" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 73, + 660, + 541, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 660, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 73, + 660, + 541, + 693 + ], + "type": "text", + "content": "[68] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, Limin Wang, and Yu Qiao. MVBench: A comprehensive multi-modal video understanding benchmark. In CVPR, 2024. 14, 15, 16, 32" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 72, + 64, + 542, + 718 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 73, + 64, + 542, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 64, + 542, + 99 + ], + "spans": [ + { + "bbox": [ + 73, + 64, + 542, + 99 + ], + "type": "text", + "content": "[69] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, 2022. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 72, + 103, + 524, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 103, + 524, + 116 + ], + "spans": [ + { + "bbox": [ + 72, + 103, + 524, + 116 + ], + "type": "text", + "content": "[70] Xianhang Li, Zeyu Wang, and Cihang Xie. An inverse scaling law for CLIP training. In NeurIPS, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 120, + 541, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 120, + 541, + 144 + ], + "spans": [ + { + "bbox": [ + 73, + 120, + 541, + 144 + ], + "type": "text", + "content": "[71] Xianhang Li, Zeyu Wang, and Cihang Xie. CLIPA-v2: Scaling CLIP training with 81.1% zero-shot imagenet accuracy within a $10,000 budget; an extra $4,000 unlocks 81.8% accuracy. arXiv:2306.15658, 2023. 3, 20" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 148, + 541, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 148, + 541, + 171 + ], + "spans": [ + { + "bbox": [ + 73, + 148, + 541, + 171 + ], + "type": "text", + "content": "[72] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. In ECCV, 2022. 11, 19, 29" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 176, + 541, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 176, + 541, + 199 + ], + "spans": [ + { + "bbox": [ + 73, + 176, + 541, + 199 + ], + "type": "text", + "content": "[73] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, 2023. 14, 15, 16, 32" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 204, + 541, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 204, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 73, + 204, + 541, + 228 + ], + "type": "text", + "content": "[74] Yanghao Li, Haoqi Fan, Ronghang Hu, Christoph Feichtenhofer, and Kaiming He. Scaling language-image pre-training via masking. In CVPR, 2023. 20" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 232, + 541, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 232, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 73, + 232, + 541, + 255 + ], + "type": "text", + "content": "[75] Zhenyu Li, Xuyang Wang, Xianming Liu, and Junjun Jiang. Binsformer: Revisiting adaptive bins for monocular depth estimation. TIP, 2024. 29" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 259, + 541, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 259, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 73, + 259, + 541, + 293 + ], + "type": "text", + "content": "[76] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In ECCV, 2014. 2, 6, 9, 12, 14, 15, 16, 19, 27, 31, 32" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 73, + 298, + 541, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 298, + 541, + 322 + ], + "spans": [ + { + "bbox": [ + 73, + 298, + 541, + 322 + ], + "type": "text", + "content": "[77] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. LLaVA-NeXT: Improved reasoning,OCR, and world knowledge, 2024. 32, 33" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 327, + 541, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 327, + 541, + 339 + ], + "spans": [ + { + "bbox": [ + 73, + 327, + 541, + 339 + ], + "type": "text", + "content": "[78] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. NeurIPS, 2024. 20, 23" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 344, + 541, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 344, + 541, + 367 + ], + "spans": [ + { + "bbox": [ + 73, + 344, + 541, + 367 + ], + "type": "text", + "content": "[79] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021. 3, 19" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 73, + 371, + 541, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 371, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 73, + 371, + 541, + 395 + ], + "type": "text", + "content": "[80] Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, and Baining Guo. Swin transformer v2: Scaling up capacity and resolution. In CVPR, 2022. 19" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 73, + 399, + 541, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 399, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 73, + 399, + 541, + 422 + ], + "type": "text", + "content": "[81] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. In CVPR, 2022. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 73, + 426, + 512, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 426, + 512, + 439 + ], + "spans": [ + { + "bbox": [ + 73, + 426, + 512, + 439 + ], + "type": "text", + "content": "[82] AI @ Meta Llama Team. The llama 3 herd of models. arXiv:2407.21783, 2024. 5, 14, 15, 16, 20, 32, 33" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 73, + 444, + 471, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 444, + 471, + 456 + ], + "spans": [ + { + "bbox": [ + 73, + 444, + 471, + 456 + ], + "type": "text", + "content": "[83] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. *ICLR*, 2019. 3, 29" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 73, + 460, + 541, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 460, + 541, + 484 + ], + "spans": [ + { + "bbox": [ + 73, + 460, + 541, + 484 + ], + "type": "text", + "content": "[84] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. CLIP4Clip: An empirical study of clip for end to end video clip retrieval. Neurocomputing, 2021. 6, 9" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 73, + 489, + 541, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 489, + 541, + 512 + ], + "spans": [ + { + "bbox": [ + 73, + 489, + 541, + 512 + ], + "type": "text", + "content": "[85] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. SiT: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In ECCV, 2024. 20" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 73, + 517, + 541, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 517, + 541, + 540 + ], + "spans": [ + { + "bbox": [ + 73, + 517, + 541, + 540 + ], + "type": "text", + "content": "[86] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-ChatGPT: Towards detailed video understanding via large vision and language models. In ACL, 2024. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 73, + 544, + 541, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 544, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 73, + 544, + 541, + 567 + ], + "type": "text", + "content": "[87] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. VideoGPT+: Integrating image and video encoders for enhanced video understanding. arXiv:2406.09418, 2024. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 73, + 572, + 541, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 572, + 541, + 595 + ], + "spans": [ + { + "bbox": [ + 73, + 572, + 541, + 595 + ], + "type": "text", + "content": "[88] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arxiv:1306.5151, 2013. 9" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 73, + 600, + 541, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 600, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 73, + 600, + 541, + 624 + ], + "type": "text", + "content": "[89] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. NeurIPS, 2024. 14, 15, 16, 32" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 73, + 628, + 541, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 628, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 73, + 628, + 541, + 662 + ], + "type": "text", + "content": "[90] Kevis-Kokitsi Maninis, Kaifeng Chen, Soham Ghosh, Arjun Karpur, Koert Chen, Ye Xia, Bingyi Cao, Daniel Salz, Guangxing Han, Jan Dlabal, et al. Tips: Text-image pretraining with spatial awareness. arXiv:2410.16512, 2024. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 73, + 667, + 541, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 667, + 541, + 690 + ], + "spans": [ + { + "bbox": [ + 73, + 667, + 541, + 690 + ], + "type": "text", + "content": "[91] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. DocVQA: A dataset for vqa on document images. In WACV, 2021. 14, 15, 16, 32" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 73, + 695, + 541, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 695, + 541, + 718 + ], + "spans": [ + { + "bbox": [ + 73, + 695, + 541, + 718 + ], + "type": "text", + "content": "[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographics. In WACV, 2022. 14, 15, 16, 32" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 542, + 704 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 73, + 64, + 542, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 64, + 542, + 120 + ], + "spans": [ + { + "bbox": [ + 73, + 64, + 542, + 120 + ], + "type": "text", + "content": "[93] Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, Anton Belyi, Haotian Zhang, Karanjeet Singh, Doug Kang, Ankur Jain, Hongyu He, Max Schwarzer, Tom Gunter, Xiang Kong, Aonan Zhang, Jianyu Wang, Chong Wang, Nan Du, Tao Lei, Sam Wiseman, Guoli Yin, Mark Lee, Zirui Wang, Ruoming Pang, Peter Grasch, Alexander Toshev, and Yinfei Yang. MM1: methods, analysis and insights from multimodal LLM pre-training. In ECCV, 2024. 20" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 72, + 125, + 542, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 125, + 542, + 159 + ], + "spans": [ + { + "bbox": [ + 72, + 125, + 542, + 159 + ], + "type": "text", + "content": "[94] Matthias Minderer, Alexey A. Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. Simple open-vocabulary object detection with vision transformers. In ECCV, 2022. 1, 20" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 163, + 542, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 163, + 542, + 186 + ], + "spans": [ + { + "bbox": [ + 73, + 163, + 542, + 186 + ], + "type": "text", + "content": "[95] Matthias Minderer, Alexey Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In NeurIPS, 2023. 20" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 191, + 541, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 191, + 541, + 215 + ], + "spans": [ + { + "bbox": [ + 73, + 191, + 541, + 215 + ], + "type": "text", + "content": "[96] Thao Nguyen, Samir Yitzhak Gadre, Gabriel Ilharco, Sewoong Oh, and Ludwig Schmidt. Improving multimodal datasets with image captioning. In NeurIPS, 2023. 5, 20" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 220, + 541, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 220, + 541, + 242 + ], + "spans": [ + { + "bbox": [ + 73, + 220, + 541, + 242 + ], + "type": "text", + "content": "[97] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In ICVGIP, 2008. 9" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 247, + 542, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 247, + 542, + 303 + ], + "spans": [ + { + "bbox": [ + 73, + 247, + 542, + 303 + ], + "type": "text", + "content": "[98] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Mido Assran, Nicolas Ballas, Wojciech Galuba, Russell Howes, Po-Yao Huang, Shang-Wen Li, Ishan Misra, Michael Rabbat, Vasu Sharma, Gabriel Synnaeve, Hu Xu, Hervé Jégou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision. TMLR, 2024. 1, 2, 10, 11, 15, 16, 18, 19, 20, 22, 29, 33" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 308, + 542, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 308, + 542, + 331 + ], + "spans": [ + { + "bbox": [ + 73, + 308, + 542, + 331 + ], + "type": "text", + "content": "[99] Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, and Philipp Krahenbuhl. NMSstrikes back. arXiv:2212.06137, 2022. 19" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 336, + 541, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 336, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 69, + 336, + 541, + 348 + ], + "type": "text", + "content": "[100] Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In CVPR, 2012. 9" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 353, + 541, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 353, + 541, + 376 + ], + "spans": [ + { + "bbox": [ + 69, + 353, + 541, + 376 + ], + "type": "text", + "content": "[101] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. arXiv:2306.14824, 2023. 20" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 381, + 541, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 381, + 541, + 415 + ], + "spans": [ + { + "bbox": [ + 69, + 381, + 541, + 415 + ], + "type": "text", + "content": "[102] Hieu Pham, Zihang Dai, Golnaz Ghiasi, Kenji Kawaguchi, Hanxiao Liu, Adams Wei Yu, Jiahui Yu, Yi-Ting Chen, Minh-Thang Luong, Yonghui Wu, Mingxing Tan, and Quoc V. Le. Combined scaling for zero-shot transfer learning. Neurocomputing, 2023. 1, 9, 20" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 419, + 541, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 419, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 69, + 419, + 541, + 453 + ], + "type": "text", + "content": "[103] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In ICCV, 2015. 27, 32" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 459, + 541, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 459, + 541, + 482 + ], + "spans": [ + { + "bbox": [ + 69, + 459, + 541, + 482 + ], + "type": "text", + "content": "[104] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 DAVIS challenge on video object segmentation. arXiv:1704.00675, 2017. 19, 29" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 487, + 541, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 487, + 541, + 542 + ], + "spans": [ + { + "bbox": [ + 69, + 487, + 541, + 542 + ], + "type": "text", + "content": "[105] Viorica Pătrăucean, Lucas Smaira, Ankush Gupta, Adriâ Recasens Continente, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Joseph Heyward, Mateusz Malinowski, Yi Yang, Carl Doersch, Tatiana Matejovicova, Yury Sulsky, Antoine Miech, Alex Frechette, Hanna Klimczak, Raphael Koster, Junlin Zhang, Stephanie Winkler, Yusuf Aytar, Simon Osindero, Dima Damen, Andrew Zisserman, and João Carreira. Perception test: A diagnostic benchmark for multimodal video models. In NeurIPS, 2024. 14, 15, 16, 32" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 547, + 541, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 547, + 541, + 582 + ], + "spans": [ + { + "bbox": [ + 69, + 547, + 541, + 582 + ], + "type": "text", + "content": "[106] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 3, 8, 9, 15, 16, 19, 20, 31, 32, 33" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 586, + 541, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 586, + 541, + 619 + ], + "spans": [ + { + "bbox": [ + 69, + 586, + 541, + 619 + ], + "type": "text", + "content": "[107] Jathushan Rajasegaran, Ilija Radosavovic, Rahul Ravishankar, Yossi Gandelsman, Christoph Feichtenhofer, and Jitendra Malik. An empirical study of autoregressive pre-training from videos. arXiv:2501.05453, 2025. 19, 20, 29" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 625, + 541, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 625, + 541, + 648 + ], + "spans": [ + { + "bbox": [ + 69, + 625, + 541, + 648 + ], + "type": "text", + "content": "[108] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv:2204.06125, 2022. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 652, + 541, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 652, + 541, + 676 + ], + "spans": [ + { + "bbox": [ + 69, + 652, + 541, + 676 + ], + "type": "text", + "content": "[109] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In ICCV, 2021, 11, 19, 29" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 681, + 541, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 681, + 541, + 704 + ], + "spans": [ + { + "bbox": [ + 69, + 681, + 541, + 704 + ], + "type": "text", + "content": "[110] Mike Ranzinger, Greg Heinrich, Jan Kautz, and Pavlo Molchanov. AM-RADIO: Agglomerative vision foundation model—reduce all domains into one. In CVPR, 2024. 1, 18, 21" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 541, + 717 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 109 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 541, + 109 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 541, + 109 + ], + "type": "text", + "content": "[111] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. SAM 2: Segment anything in images and videos. In ICLR, 2024. 2, 5, 17, 18, 34" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 114, + 541, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 114, + 541, + 137 + ], + "spans": [ + { + "bbox": [ + 67, + 114, + 541, + 137 + ], + "type": "text", + "content": "[112] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. DoImagenet classifiers generalize toImagenet? In ICML, 2019. 3, 6, 8, 9, 30, 31, 32" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 142, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 142, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 69, + 142, + 541, + 175 + ], + "type": "text", + "content": "[113] William A. Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. The dollar street dataset: images representing the geographic and socioeconomic diversity of the world. In NeurIPS Datasets and Benchmarks, 2022. 10" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 180, + 541, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 180, + 541, + 204 + ], + "spans": [ + { + "bbox": [ + 69, + 180, + 541, + 204 + ], + "type": "text", + "content": "[114] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2022. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 209, + 541, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 209, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 69, + 209, + 541, + 232 + ], + "type": "text", + "content": "[115] Mert Bulent Sariyildiz, Julien Perez, and Diane Larlus. Learning visual representations with caption annotations. In ECCV, 2020. 20" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 237, + 541, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 237, + 541, + 259 + ], + "spans": [ + { + "bbox": [ + 69, + 237, + 541, + 259 + ], + "type": "text", + "content": "[116] Mert Bulent Sariyildiz, Philippe Weinzaepfel, Thomas Lucas, Diane Larlus, and Yannis Kalantidis. UNIC: Universal classification models via multi-teacher distillation. In ECCV, 2024. 18" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 264, + 541, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 264, + 541, + 309 + ], + "spans": [ + { + "bbox": [ + 69, + 264, + 541, + 309 + ], + "type": "text", + "content": "[117] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade W Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa R Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5b: An open large-scale dataset for training next generation image-text models. In NeurIPS Datasets and Benchmarks, 2022. 20" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 314, + 541, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 314, + 541, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 314, + 541, + 338 + ], + "type": "text", + "content": "[118] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-OKVQA: A benchmark for visual question answering using world knowledge. In ECCV, 2022. 14, 15, 16, 32" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 342, + 541, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 342, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 69, + 342, + 541, + 365 + ], + "type": "text", + "content": "[119] Jinghuan Shang, Karl Schmeckpeper, Brandon B May, Maria Vittoria Minniti, Tarik Kelestemur, David Watkins, and Laura Herlant. Theia: Distilling diverse vision foundation models for robot learning. In CoRL, 2024. 18" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 370, + 541, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 370, + 541, + 393 + ], + "spans": [ + { + "bbox": [ + 69, + 370, + 541, + 393 + ], + "type": "text", + "content": "[120] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In ICCV, 2019. 19" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 398, + 541, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 398, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 69, + 398, + 541, + 422 + ], + "type": "text", + "content": "[121] Shashank Shekhar, Florian Bordes, Pascal Vincent, and Ari Morcos. Objectives matter: Understanding the impact of self-supervised objectives on vision transformer representations. arXiv:2304.13089, 2023. 20" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 426, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 426, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 426, + 541, + 449 + ], + "type": "text", + "content": "[122] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension. In ECCV, 2020. 10" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 454, + 541, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 454, + 541, + 477 + ], + "spans": [ + { + "bbox": [ + 69, + 454, + 541, + 477 + ], + "type": "text", + "content": "[123] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 19, 29" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 482, + 541, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 482, + 541, + 505 + ], + "spans": [ + { + "bbox": [ + 69, + 482, + 541, + 505 + ], + "type": "text", + "content": "[124] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In ICLR, 2015. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 510, + 541, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 510, + 541, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 510, + 541, + 533 + ], + "type": "text", + "content": "[125] Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Devi Parikh, and Marcus Rohrbach. Towards VQA models that can read. In CVPR, 2019. 14, 15, 16, 32" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 537, + 541, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 537, + 541, + 560 + ], + "spans": [ + { + "bbox": [ + 69, + 537, + 541, + 560 + ], + "type": "text", + "content": "[126] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. UCF101: A dataset of 101 human actions classes from videos in the wild. arXiv:1212.0402, 2012. 9, 31, 32" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 565, + 541, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 565, + 541, + 589 + ], + "spans": [ + { + "bbox": [ + 69, + 565, + 541, + 589 + ], + "type": "text", + "content": "[127] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. Neurocomputing, 2024. 4, 20, 25" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 594, + 541, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 594, + 541, + 616 + ], + "spans": [ + { + "bbox": [ + 69, + 594, + 541, + 616 + ], + "type": "text", + "content": "[128] Lin Sun, Jiale Cao, Jin Xie, Xiaoheng Jiang, and Yanwei Pang. CLIPer: Hierarchically improving spatial representation of CLIP for open-vocabulary semantic segmentation. arXiv:2411.13836, 2024. 20" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 621, + 541, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 621, + 541, + 644 + ], + "spans": [ + { + "bbox": [ + 69, + 621, + 541, + 644 + ], + "type": "text", + "content": "[129] Quan Sun, Yuxin Fang, Ledell Wu, Xinlong Wang, and Yue Cao. EVA-CLIP: Improved training techniques for clip at scale. arXiv:2303.15389, 2023. 20" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 649, + 541, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 649, + 541, + 672 + ], + "spans": [ + { + "bbox": [ + 69, + 649, + 541, + 672 + ], + "type": "text", + "content": "[130] Quan Sun, Jinsheng Wang, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, and Xinlong Wang. EVA-CLIP-18B: Scaling clip to 18 billion parameters. arXiv:2402.04252, 2024. 1, 9, 10, 20, 26" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 677, + 541, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 541, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 541, + 700 + ], + "type": "text", + "content": "[131] Mingxing Tan and Quoc Le. EfficientNet: Rethinking model scaling for convolutional neural networks. In ICML, 2019. 1, 3, 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 705, + 397, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 705, + 397, + 717 + ], + "spans": [ + { + "bbox": [ + 69, + 705, + 397, + 717 + ], + "type": "text", + "content": "[132] Gemma Team. Gemma 3 technical report. arXiv:2503.19786, 2025. 16, 20" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 704 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "text", + "content": "[133] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. YFCC100M: The new data in multimedia research. Communications of the ACM, 2016. 9" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 92, + 541, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 92, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 69, + 92, + 541, + 125 + ], + "type": "text", + "content": "[134] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, Ziteng Wang, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. In NeurIPS, 2024. 11, 20" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 131, + 541, + 154 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 131, + 541, + 154 + ], + "spans": [ + { + "bbox": [ + 69, + 131, + 541, + 154 + ], + "type": "text", + "content": "[135] Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In ICCV, 2021. 14, 17" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 159, + 498, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 159, + 498, + 171 + ], + "spans": [ + { + "bbox": [ + 69, + 159, + 498, + 171 + ], + "type": "text", + "content": "[136] Hugo Touvron, Matthieu Cord, and Hervé Jégou. DeiT III: Revenge of the ViT. In ECCV, 2022. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 176, + 541, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 176, + 541, + 198 + ], + "spans": [ + { + "bbox": [ + 69, + 176, + 541, + 198 + ], + "type": "text", + "content": "[137] Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. In NeurIPS, 2023. 1, 20" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 204, + 541, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 204, + 541, + 248 + ], + "spans": [ + { + "bbox": [ + 69, + 204, + 541, + 248 + ], + "type": "text", + "content": "[138] Michael Tschannen, Alexey Gritsenko, Xiao Wang, Muhammad Ferjad Naeem, Ibrahim Alabdulmohsin, Nikhil Parthasarathy, Talfan Evans, Lucas Beyer, Ye Xia, Basil Mustafa, Olivier Henaff, Jeremiah Harmsen, Andreas Steiner, and Xiaohua Zhai. SigLIP 2: Multilingual vision-language encoders with improved semantic understanding, localization, and dense features. arXiv:2502.14786, 2025. 2, 7, 8, 9, 10, 15, 16, 18, 19, 26, 32, 33" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 254, + 541, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 254, + 541, + 286 + ], + "spans": [ + { + "bbox": [ + 69, + 254, + 541, + 286 + ], + "type": "text", + "content": "[139] Jack Urbanek, Florian Bordes, Pietro Astolfi, Mary Williamson, Vasu Sharma, and Adriana Romero-Soriano. A picture is worth more than 77 text tokens: Evaluating CLIP-style models on dense captions. In CVPR, 2024. 27, 32" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 293, + 541, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 293, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 69, + 293, + 541, + 316 + ], + "type": "text", + "content": "[140] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In CVPR, 2018. 10" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 320, + 541, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 320, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 69, + 320, + 541, + 342 + ], + "type": "text", + "content": "[141] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 25" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 349, + 541, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 349, + 541, + 371 + ], + "spans": [ + { + "bbox": [ + 69, + 349, + 541, + 371 + ], + "type": "text", + "content": "[142] Matthew Walmer, Saksham Suri, Kamal Gupta, and Abhinav Shrivastava. Teaching matters: Investigating the role of supervision in vision transformers. In CVPR, 2023. 20" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 376, + 541, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 376, + 541, + 399 + ], + "spans": [ + { + "bbox": [ + 69, + 376, + 541, + 399 + ], + "type": "text", + "content": "[143] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In NeurIPS, 2019. 3, 8, 9, 30, 31" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 404, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 404, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 404, + 541, + 449 + ], + "type": "text", + "content": "[144] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-VL: Enhancing vision-language model's perception of the world at any resolution. arXiv:2409.12191, 2024. 16, 20" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 454, + 541, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 454, + 541, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 454, + 541, + 487 + ], + "type": "text", + "content": "[145] Wenhai Wang, Jifeng Dai, Zhe Chen, Zhenhang Huang, Zhiqi Li, Xizhou Zhu, Xiaowei Hu, Tong Lu, Lewei Lu, Hongsheng Li, Xiaogang Wang, and Yu Qiao. InternImage: Exploring large-scale vision foundation models with deformable convolutions. In CVPR, 2023. 19" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 493, + 541, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 493, + 541, + 526 + ], + "spans": [ + { + "bbox": [ + 69, + 493, + 541, + 526 + ], + "type": "text", + "content": "[146] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, Tianxiang Jiang, Songze Li, Jilan Xu, Hongjie Zhang, Yifei Huang, Yu Qiao, Yali Wang, and Limin Wang. InternVideo2: Scaling foundation models for multimodal video understanding. In ECCV, 2024. 2, 9" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 532, + 541, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 532, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 69, + 532, + 541, + 555 + ], + "type": "text", + "content": "[147] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, 2022. 4, 17" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 559, + 541, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 559, + 541, + 582 + ], + "spans": [ + { + "bbox": [ + 69, + 559, + 541, + 582 + ], + "type": "text", + "content": "[148] Bo Wu, Shoubin Yu, Zhenfang Chen, Joshua B Tenenbaum, and Chuang Gan. STAR: A benchmark for situated reasoning in real-world videos. In NeurIPS, 2021. 14, 15, 16, 32" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 588, + 512, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 588, + 512, + 600 + ], + "spans": [ + { + "bbox": [ + 69, + 588, + 512, + 600 + ], + "type": "text", + "content": "[149] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detector2, 2019. 29" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 605, + 541, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 605, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 69, + 605, + 541, + 627 + ], + "type": "text", + "content": "[150] Jianxiong Xiao, Krista A. Ehinger, James Hays, Antonio Torralba, and Aude Oliva. SUN database: Exploring a large collection of scene categories. IJCV, 2014. 9" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 632, + 541, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 632, + 541, + 666 + ], + "spans": [ + { + "bbox": [ + 69, + 632, + 541, + 666 + ], + "type": "text", + "content": "[151] Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen tau Yih, Shang-Wen Li, Saining Xie, and Christoph Feichtenhofer. Altogether: Image captioning via re-aligning alt-text. In EMNLP, 2024. 5, 20" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 671, + 541, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 671, + 541, + 704 + ], + "spans": [ + { + "bbox": [ + 69, + 671, + 541, + 704 + ], + "type": "text", + "content": "[152] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. In ICLR, 2024. 1, 3, 8, 15, 19, 20, 32, 33" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 542, + 703 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "text", + "content": "[153] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. MSR-VTT: A large video description dataset for bridging video and language. In CVPR, 2016. 6, 7, 31, 32" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 91, + 542, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 91, + 542, + 180 + ], + "spans": [ + { + "bbox": [ + 69, + 91, + 542, + 180 + ], + "type": "text", + "content": "[154] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arxiv:2407.10671, 2024. 16" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 186, + 542, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 186, + 542, + 252 + ], + "spans": [ + { + "bbox": [ + 69, + 186, + 542, + 252 + ], + "type": "text", + "content": "[155] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv:2412.15115, 2024. 16, 33" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 258, + 541, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 258, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 69, + 258, + 541, + 291 + ], + "type": "text", + "content": "[156] Yang You, Jing Li, Sashank J. Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In ICLR, 2020. 3, 20" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 297, + 541, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 297, + 541, + 319 + ], + "spans": [ + { + "bbox": [ + 70, + 297, + 541, + 319 + ], + "type": "text", + "content": "[157] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. TACL, 2014. 9, 14, 15, 16, 32" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 324, + 541, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 541, + 347 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 541, + 347 + ], + "type": "text", + "content": "[158] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. CoCa: Contrastive captioners are image-text foundation models. TMLR, 2022. 1, 9, 20" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 353, + 541, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 353, + 541, + 385 + ], + "spans": [ + { + "bbox": [ + 69, + 353, + 541, + 385 + ], + "type": "text", + "content": "[159] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. In ICLR, 2025, 20, 21" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 392, + 541, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 392, + 541, + 414 + ], + "spans": [ + { + "bbox": [ + 70, + 392, + 541, + 414 + ], + "type": "text", + "content": "[160] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. 1, 4, 7, 9, 16, 19, 20, 22, 25, 26, 30" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 419, + 541, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 419, + 541, + 441 + ], + "spans": [ + { + "bbox": [ + 70, + 419, + 541, + 441 + ], + "type": "text", + "content": "[161] Hao Zhang, Feng Li, Shilong Liu, Lei Zhang, Hang Su, Jun Zhu, Lionel M Ni, and Heung-Yeung Shum. DINO: DETR with improved denoising anchor boxes for end-to-end object detection. In ICLR, 2023. 19" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 447, + 492, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 447, + 492, + 459 + ], + "spans": [ + { + "bbox": [ + 69, + 447, + 492, + 459 + ], + "type": "text", + "content": "[162] Richard Zhang, Phillip Isola, and Alexei A Efros. Colorful image colorization. In ECCV, 2016. 20" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 464, + 541, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 464, + 541, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 464, + 541, + 487 + ], + "type": "text", + "content": "[163] Yuhao Zhang, Hang Jiang, Yasuhide Miura, Christopher D. Manning, and Curtis P. Langlotz. Contrastive learning of medical visual representations from paired images and text. In MLHC, 2022. 20" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 492, + 541, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 492, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 69, + 492, + 541, + 536 + ], + "type": "text", + "content": "[164] Long Zhao, Nitesh Bharadwaj Gundavarapu, Liangzhe Yuan, Hao Zhou, Shen Yan, Jennifer J. Sun, Luke Friedman, Rui Qian, Tobias Weyand, Yue Zhao, Rachel Hornung, Florian Schroff, Ming Yang, David A. Ross, Huisheng Wang, Hartwig Adam, Mikhail Sirotenko, Ting Liu, and Boqing Gong. VideoPrism: A foundational visual encoder for video understanding. In ICML, 2024. 9" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 542, + 541, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 542, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 69, + 542, + 541, + 564 + ], + "type": "text", + "content": "[165] Hanwen Zheng, Sijia Wang, Chris Thomas, and Lifu Huang. Advancing chart question answering with robust chart component recognition. In WACV, 2025. 14, 15, 16, 32" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 570, + 541, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 570, + 541, + 592 + ], + "spans": [ + { + "bbox": [ + 69, + 570, + 541, + 592 + ], + "type": "text", + "content": "[166] Liang Zheng, Yali Zhao, Shengjin Wang, Jingdong Wang, and Qi Tian. Good practice in cnn feature transfer. arXiv:1604.00133, 2016. 20" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 597, + 541, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 597, + 541, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 597, + 541, + 620 + ], + "type": "text", + "content": "[167] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ADE20K dataset. In CVPR, 2017. 19, 29" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 625, + 541, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 625, + 541, + 703 + ], + "spans": [ + { + "bbox": [ + 69, + 625, + 541, + 703 + ], + "type": "text", + "content": "[168] Jinguo Zhu, Weiyun Wang, Zhe Chen, Zhaoyang Liu, Shenglong Ye, Lixin Gu, Yuchen Duan, Hao Tian, Weijie Su, Jie Shao, Zhangwei Gao, Erfei Cui, Yue Cao, Yangzhou Liu, Weiye Xu, Hao Li, Jiahao Wang, Han Lv, Dengnian Chen, Songze Li, Yinan He, Tan Jiang, Jiapeng Luo, Yi Wang, Conghui He, Botian Shi, Xingcheng Zhang, Wenqi Shao, Junjun He, Yingtong Xiong, Wenwen Qu, Peng Sun, Penglong Jiao, Lijun Wu, Kaipeng Zhang, Huipeng Deng, Jiaye Ge, Kai Chen, Limin Wang, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. InternVL3: Exploring advanced training and test-time recipes for open-source multimodal models. arxiv:2504.10479, 2025. 2, 16" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 86 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 541, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 541, + 86 + ], + "type": "text", + "content": "[169] Zhuofan Zong, Guanglu Song, and Yu Liu. DETRs with collaborative hybrid assignments training. In ICCV, 2023. 19" + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 742, + 311, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 742, + 311, + 751 + ], + "spans": [ + { + "bbox": [ + 300, + 742, + 311, + 751 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_content_list.json b/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5f160083d1b22e4a529d7100cd0a1567d55e39de --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_content_list.json @@ -0,0 +1,2615 @@ +[ + { + "type": "text", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "text_level": 1, + "bbox": [ + 116, + 109, + 854, + 131 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mehmet Hamza Erol* 1 Batu El* 1 Mirac Suzgun* 1 Mert Yuksekgonul† 1 James Zou† 1", + "bbox": [ + 171, + 176, + 797, + 193 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 241, + 220, + 320, + 236 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The widespread adoption of AI systems in the economy hinges on their ability to generate economic value that outweighs their inference costs. Evaluating this tradeoff requires metrics that account for both performance and costs. We propose a framework grounded in production theory for evaluating language models by combining accuracy and inference cost. We introduce cost-of-pass, the expected monetary cost of generating a correct solution. We then define the frontier cost-of-pass as the minimum cost-of-pass achievable across available models or the human-expert, using the approximate cost of hiring an expert. Our analysis reveals distinct economic insights. First, lightweight models are most cost-effective for basic quantitative tasks, large models for knowledge-intensive ones, and reasoning models for complex quantitative problems, despite higher per-token costs. Second, tracking this frontier cost-of-pass over the past year reveals significant progress, particularly for complex quantitative tasks where the cost has roughly halved every few months. Third, to trace key innovations driving this progress, we examine counterfactual frontiers—estimates of cost-efficiency without specific model classes. We find that innovations in lightweight, large, and reasoning models have been essential for pushing the frontier in basic quantitative, knowledge-intensive, and complex quantitative tasks, respectively. Finally, we assess the cost-reductions afforded by common inference-time techniques like majority voting and self-refinement, finding that their marginal accuracy gains rarely justify their costs. Our findings underscore that complementary model-level innovations are the primary drivers of cost-efficiency, and our economic framework provides a principled tool for measuring this progress and guiding deployment.", + "bbox": [ + 117, + 251, + 444, + 828 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 220, + 629, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The recent progress in generative AI, particularly language models (LMs), has sparked significant interest in their potential to transform industries, automate cognitive tasks, and reshape economic productivity (Brynolfsson et al., 2025; Eloundou et al., 2024; Acemoglu, 2024). The widespread adoption of these AI systems in the economy hinges on whether the economic benefits generated by the tasks they can perform outweigh the associated inference costs, and whether those inference costs are lower than the cost of equivalent human labor. Consequently, two priorities have emerged at the forefront of LM research: advancing capabilities and reducing costs. These goals, however, often involve trade-offs with more powerful models or test-time techniques that offer higher accuracy at the expense of greater computational and monetary cost (Chen et al., 2024; Parashar et al., 2025; Madaan et al., 2023; Wang et al., 2023; Kapoor et al., 2024). While standard metrics capture accuracy or other system capabilities, they fail to account for cost, leading to an incomplete picture of progress. Ultimately, what matters to the users is not just raw capability, but the value delivered relative to cost and the standard has been to interpret and report these separately. As the ecosystem of models grows, it is essential to assess new models not in isolation, but in the context of a broader ecosystem, where marginal improvements may or may not justify higher costs, and do so in an easy-to-interpret manner.", + "bbox": [ + 496, + 244, + 887, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To systematically investigate the trade-off between cost and performance and analyze the LM ecosystem as a whole, we draw insights from a well-established and foundational framework from economics: production frontiers. Economists have long studied these frontiers, which map a set of inputs to the maximum output attainable under a given technology (Farrell, 1957). In Farrell's original formulation, a producer is technically efficient if no input can be reduced without lowering output, and price efficient if the input mix minimizes cost given input prices. Together, these conditions yield the lowest possible cost per unit of output. Extending this framework, Aigner et al. (1977) introduced stochastic frontier production functions, in which the relationship between inputs and output is modeled as stochastic rather than deterministic, practically accounting for potential defective outputs that do not pass evaluation criteria due to factors beyond the producer's control.", + "bbox": [ + 496, + 645, + 888, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13359v1 [cs.AI] 17 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "\\*Co-first authors.", + "bbox": [ + 109, + 849, + 215, + 862 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Co-senior authors.", + "bbox": [ + 109, + 863, + 227, + 876 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Stanford University. $\\boxtimes$ {mhamza, jamesz}@stanford.edu.", + "bbox": [ + 109, + 877, + 468, + 891 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\ddagger$ https://github.com/mhamzaerol/Cost-of-Pass.", + "bbox": [ + 109, + 892, + 431, + 904 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Concepts", + "text_level": 1, + "bbox": [ + 156, + 99, + 269, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Cost-of-Pass: Expected cost of producing a correct output.", + "bbox": [ + 117, + 143, + 307, + 169 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nv (m, p) = \\frac {\\mathbb {E} [ \\operatorname {c o s t} _ {m} (p) ]}{\\mathbb {E} [ \\operatorname {a c c u r a c y} _ {m} (p) ]} = \\frac {C _ {m} (p)}{R _ {m} (p)}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 175, + 308, + 200 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Human Expert Baseline Cost: Cost of hiring a human expert to produce a correct output.", + "bbox": [ + 117, + 220, + 307, + 256 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nv (\\mathrm {e x p e r t}, p) \\approx C _ {\\mathrm {e x p e r t}} (p)\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 260, + 282, + 273 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Frontier Cost-of-Pass: Lowest cost-of-pass given available set of LMs & human expert baseline.", + "bbox": [ + 116, + 296, + 308, + 332 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\min \\left(V _ {p} (\\mathcal {M}), v (\\text {e x p e r t}, p)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 335, + 281, + 351 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nV _ {p} (\\mathcal {M}) = \\min _ {m \\in \\mathcal {M}} v (m, p) \\longrightarrow \\text {B e s t L M C o s t - o f - P a s s}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 356, + 312, + 371 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8d467f217f9407528afd6c84dbf9877b030a64551f57ce84212c3a7b5172a491.jpg", + "image_caption": [ + "(A) Frontier Cost-of-pass & Human Expert Baseline" + ], + "image_footnote": [], + "bbox": [ + 349, + 132, + 594, + 227 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/869b9e8e33e8a21c6ac492789fe60d695ff8a85c3c275e6387b51ddd84afe964.jpg", + "image_caption": [ + "(B) Progress as Frontier Cost-of-Pass over Time" + ], + "image_footnote": [], + "bbox": [ + 348, + 282, + 598, + 385 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/f1e0c2765cf0b0f42a347df84e77cd52bdd634575ea7b0bab1fce2a5610de61a.jpg", + "image_caption": [ + "(C) Essentialness of Model Families to Task Categories" + ], + "image_footnote": [], + "bbox": [ + 619, + 132, + 872, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/fbd30ce96b94f3798861605fb3b986a8070b9c16bb602ce9ffd2872f3cda7836.jpg", + "image_caption": [ + "(D) Cost Reductions with Inference Time Techniques", + "Figure 1: Highlights of the cost-of-pass framework and empirical analyses. Core concepts (left) set foundations for: (A) Comparing the Human Expert Baseline to the frontier achieved by the single most effective LM per task category. (B) Tracking the reduction in frontier cost-of-pass over time, indicating progress driven by new model releases (color-coded by family). (C) Quantifying the essential contribution of each model family: lightweight (less than $1 per million tokens), large, and reasoning; to the current cost-efficiency frontier, measured by the percentage of each family's contribution. (D) Assessing the economic benefit (relative cost reduction) achieved by applying common inference-time techniques over the baseline model frontier (which rarely results in meaningful gains)." + ], + "image_footnote": [], + "bbox": [ + 619, + 284, + 872, + 386 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These economic concepts are highly relevant to modern LMs, which inherently function as stochastic producers: for a given input, they yield a desired output (e.g., a correct solution) stochastically (Brown et al., 2024). Common practices such as employing scaffolds or more computationally intensive inference techniques (Snell et al., 2024; Madaan et al., 2023; Wang et al., 2023) represent efforts to manipulate this production process. These strategies seek to increase the probability of success but typically do so at the expense of higher computational cost, directly mirroring the economic trade-offs inherent in production efficiency. Motivated by these parallels and the economic goal of minimizing cost per successful output under uncertainty, we develop a quantitative framework tailored to LMs.", + "bbox": [ + 84, + 487, + 475, + 698 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize our contributions as follows.", + "bbox": [ + 84, + 705, + 375, + 720 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Concepts. We introduce cost-of-pass (§2.2), which quantifies the expected monetary cost to achieve a successful output for a given problem. Building on this concept and incorporating a human-expert cost baseline, we define the frontier cost-of-pass as the minimum achievable cost-of-pass across all available options (LMs and human-expert) for that problem. We show these reveal distinct economic niches for model families (e.g., lightweight vs. reasoning models) on different tasks, which accuracy comparisons alone obscure (§3.2).", + "bbox": [ + 83, + 729, + 475, + 878 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Tracking progress with frontier cost-of-pass. Using the", + "bbox": [ + 84, + 887, + 473, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "cost-of-pass and frontier cost-of-pass, we analyze economic improvements across three task categories from May 2024 to February 2025. We observe an exponential decrease in frontier cost-of-pass across all tasks, though the trends vary. Notably, we observe that, over the past year, the expected cost of generating a correct solution to complex quantitative problems has been cut in half every few months. We find that the frontier cost-of-pass is driven primarily by lightweight models and reasoning models (§3.3).", + "bbox": [ + 495, + 487, + 885, + 623 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Counterfactual frontier in the absence of model families. We show that our analysis reveals the complementary roles of different model types in driving recent progress. Innovations in lightweight models have been instrumental in reducing costs on basic quantitative tasks. Large models, by contrast, have been most impactful for knowledge-based benchmarks like GPQA Diamond (Rein et al., 2024). Meanwhile, reasoning models have been central to advances on complex quantitative reasoning challenges such as AIME (MAA, 2024) and MATH (Hendrycks et al., 2021) ( $\\S$ 3.4).", + "bbox": [ + 495, + 630, + 887, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Impact of post-hoc inference time techniques. We observe that common test-time techniques such as self-refinement (Madaan et al., 2023) and majority voting (self-consistency; Wang et al., 2022) to improve performance offer either limited or no economic benefits, indicating that the recent reductions in frontier cost-of-pass have been mostly driven by model-level innovations (§ 3.5).", + "bbox": [ + 495, + 789, + 888, + 895 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Setup", + "text_level": 1, + "bbox": [ + 84, + 83, + 160, + 101 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Economic Theory of Production Efficiency", + "text_level": 1, + "bbox": [ + 84, + 109, + 415, + 125 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Classical production theory examines how producers convert inputs into outputs efficiently. Given a set of producers $\\mathcal{F} = \\{f_0, \\dots, f_{n-1}\\}$ , we are often interested in the maximum output attainable for a given combination of inputs. If producing $u \\in \\mathbb{R}_{>0}$ units of output requires an input vector $\\mathbf{x} \\in \\mathbb{R}_{\\geq 0}^k$ (e.g., quantities of different resources), the input requirement set $P_u$ contains all input vectors capable of producing at least $u$ units:", + "bbox": [ + 84, + 133, + 475, + 253 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP _ {u} = \\left\\{\\mathbf {x} \\mid \\max _ {f _ {i} \\in \\mathcal {F}} f _ {i} (\\mathbf {x}) \\geq u \\right\\}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 265, + 475, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Based on this input requirement and a vector $\\mathbf{w_i} \\in \\mathbb{R}_{\\geq 0}^k$ being the prices of the inputs (incurred by each producer $i$ ), the frontier cost for producing $u$ units of output is the minimum cost required:", + "bbox": [ + 84, + 300, + 475, + 361 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nV _ {u} = \\min _ {\\mathbf {x} \\in P _ {u}, f _ {i} \\in \\mathcal {F}} \\mathbf {w} _ {\\mathbf {i}} ^ {T} \\mathbf {x}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 371, + 475, + 397 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "subject to $f_{i}(\\mathbf{x}) \\geq u$ implicitly included in $\\mathbf{x} \\in P_u$ . This $V_{u}$ quantifies the lowest possible cost to achieve output $u$ given the available production technologies $(\\mathcal{F})$ and input prices $(\\mathbf{w_i})$ . Farrell (1957) used these core concepts to build definitions for technical and price efficiency in a production ecosystem for producers. Critically, Aigner et al. (1977) extended this framework to handle stochastic production functions, where output is probabilistic for a given input.", + "bbox": [ + 84, + 406, + 473, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Building on this economic foundation, we adapt the core concept of a frontier cost $(V_{u})$ to represent the minimum achievable cost for obtaining a correct solution using LMs. Recognizing that a key aspect of LM behavior is its inherent stochasticity, an issue long addressed in economic production theory (Aigner et al., 1977), we incorporate this variability into our cost-efficiency metric. This enables us to align our framework with core production concepts and assess the economic impact of stochastic LM producers.", + "bbox": [ + 84, + 536, + 475, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Cost-of-Pass: An Efficiency Metric for LMs", + "text_level": 1, + "bbox": [ + 84, + 686, + 423, + 704 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here we instantiate the economic framework for language models (LMs). Consider a specific problem $p$ , where the unit of production is a correct solution. We define a model $m$ as an inference pipeline using an LM, acting as a stochastic producer. Two quantities characterize its efficiency on problem $p$ :", + "bbox": [ + 84, + 710, + 475, + 804 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$R_{m}(p) = \\mathrm{Prob.}$ of $m$ producing a correct answer on $p$", + "bbox": [ + 84, + 814, + 447, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$C_m(p) = \\text{Expected cost of one inference attempt by } m \\text{ on } p$ .", + "bbox": [ + 84, + 833, + 486, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the context of LMs, the inputs $\\mathbf{x}$ correspond to resources like prompt and generated tokens, while the input prices $\\mathbf{w}$ represent the costs per token charged by the provider. The", + "bbox": [ + 84, + 859, + 475, + 906 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "total cost of these inputs for a single inference attempt by model $m$ on problem $p$ is captured by $C_m(p)$ , effectively instantiating the term $\\mathbf{w}^T\\mathbf{x}$ from the theory in the previous section.", + "bbox": [ + 496, + 84, + 885, + 143 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since the model output is stochastic, the expected number of attempts to obtain the first correct solution is $1 / R_{m}(p)$ , assuming independent trials. This yields the cost-of-pass, defined as the expected monetary cost to obtain one correct solution for problem $p$ :", + "bbox": [ + 496, + 152, + 887, + 229 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nv (m, p) = \\frac {C _ {m} (p)}{R _ {m} (p)}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 620, + 242, + 885, + 276 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The cost-of-pass integrates both performance $(R_{m}(p))$ and cost $(C_m(p))$ into a single economically interpretable metric: it quantifies how efficiently financial resources are converted into correct outputs. This formulation mirrors classical production theory, where the goal is to assess the cost of achieving a specific target output (Farrell, 1957); in our case, the target is a correct solution. When a model cannot produce one $(R_{m}(p) = 0)$ , the cost-of-pass becomes infinite, appropriately signaling infeasibility.", + "bbox": [ + 495, + 289, + 888, + 426 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. The LM Frontier Cost-of-Pass", + "text_level": 1, + "bbox": [ + 496, + 441, + 743, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While cost-of-pass (§ 2.2) evaluates a single model's efficiency, understanding the overall state of LM capabilities for a given problem requires assessing the collective performance of the entire available LM ecosystem. Therefore, analogous to the frontier cost $V_{u}$ (Eq. 2), we define the $LM$ frontier cost-of-pass for problem $p$ as the minimum cost-of-pass achievable using any available LM strategy $m$ from the set $\\mathcal{M}$ :", + "bbox": [ + 495, + 465, + 888, + 585 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nV _ {p} (\\mathcal {M}) = \\min _ {m \\in \\mathcal {M}} v (m, p). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 606, + 601, + 885, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$V_{p}(\\mathcal{M})$ quantifies the minimum expected cost to solve problem $p$ using the most cost-effective model currently available within the set $\\mathcal{M}$ . If no LM in $\\mathcal{M}$ can solve $p$ (i.e., $R_{m}(p) = 0$ for all $m\\in \\mathcal{M}$ ), then $V_{p}(\\mathcal{M}) = \\infty$ .", + "bbox": [ + 496, + 638, + 887, + 702 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.4. Grounding Evaluation: Estimated Human-Expert Baseline", + "text_level": 1, + "bbox": [ + 496, + 715, + 879, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The LM frontier cost-of-pass $V_{p}(\\mathcal{M})$ reveals the best LM performance but lacks context: it does not show if LMs are economically advantageous over human labor. Moreover, the LM frontier cost-of-pass can be infinite if no LM succeeds. To address both, we introduce human-expert baseline as a reference point, by considering a human-expert annotator as a specific strategy: $m_{\\mathrm{expert}}$ . Let $\\mathcal{M}_0 = \\{m_{\\mathrm{expert}}\\}$ represent this baseline set. We assume experts typically achieve near-perfect correctness $(R_{\\mathrm{expert}}(p) \\approx 1)$ for tasks they are qualified for. Thus, the cost-of-pass for a qualified", + "bbox": [ + 495, + 753, + 887, + 906 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 71 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "expert is approximately their labor cost per problem:", + "bbox": [ + 84, + 85, + 436, + 102 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nv (\\text {e x p e r t}, p) \\approx C _ {\\text {e x p e r t}} (p). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 113, + 475, + 132 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The estimation of $C_{\\mathrm{expert}}(p)$ involves considering required expertise, time per problem, and appropriate compensation rates (detailed in § 2.6.1). By incorporating this baseline, we define the frontier cost-of-pass for problem $p$ , considering both LMs $(\\mathcal{M})$ and the human-expert alternative $(\\mathcal{M}_0)$ :", + "bbox": [ + 84, + 142, + 475, + 219 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nV _ {p} (\\mathcal {M} \\cup \\mathcal {M} _ {0}) = \\min \\left(V _ {p} (\\mathcal {M}), v (\\text {e x p e r t}, p)\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 231, + 475, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This frontier cost-of-pass represents the true minimum expected cost to obtain a correct solution for problem $p$ using the best available option, whether it's an LM or a human. Crucially, $V_{p}(\\mathcal{M} \\cup \\mathcal{M}_{0})$ is always finite (assuming finite human-expert cost and capability).", + "bbox": [ + 84, + 261, + 477, + 338 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.5. Measuring Progress and Value Gain", + "text_level": 1, + "bbox": [ + 84, + 353, + 372, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To track improvements against the best available option over time, let $\\mathcal{M}_t$ denote the total set of available strategies at time $t$ , encompassing both the set of LM strategies released up to time $t$ and the human-expert baseline $\\mathcal{M}_0$ , that is, $\\mathcal{M}_t = \\{m_{\\leq t}\\} \\cup \\mathcal{M}_0$ . The frontier cost-of-pass achievable at time $t$ can be calculated as:", + "bbox": [ + 84, + 376, + 475, + 467 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nV _ {p} \\left(\\mathcal {M} _ {t}\\right) = \\min _ {m \\in \\mathcal {M} _ {t}} v (m, p). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 479, + 475, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As new LM models $\\{m_t\\}$ are released, the set expands such that $\\mathcal{M}_t = \\mathcal{M}_{t - 1} \\cup \\{m_t\\}$ . Consequently, the frontier cost-of-pass $V_{p}(\\mathcal{M}_{t})$ forms a non-increasing sequence over time $t$ , tracking the reduction in the minimum cost needed to solve a particular problem $p$ .", + "bbox": [ + 84, + 516, + 475, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To quantify the economic impact of new developments, we define the gain. When a new set of models $\\{m_t\\}$ becomes available at time $t$ (often a single model), the gain for problem $p$ is the reduction it causes in the frontier cost-of-pass:", + "bbox": [ + 84, + 599, + 475, + 660 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nG _ {p} \\left(\\left\\{m _ {t} \\right\\}, \\mathcal {M} _ {t - 1}\\right) = V _ {p} \\left(\\mathcal {M} _ {t - 1}\\right) - V _ {p} \\left(\\mathcal {M} _ {t - 1} \\cup \\left\\{m _ {t} \\right\\}\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 672, + 473, + 704 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that $G_{p}$ measures how much cheaper the new model(s), $\\{m_t\\}$ , make solving $p$ compared to prior best options, including humans. Hence, a large $G_{p}$ value indicates a significant economic contribution in solving $p$ . This notion underlies our experiments, analyzing the value generated by models relative to the human baseline and tracking the evolution of the overall frontier.", + "bbox": [ + 84, + 715, + 475, + 823 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Extending to a distribution. Although measuring frontier cost-of-pass and value gain for individual problems can be informative, particularly through a fine-grained perspective, we often care about more than a single instance. Let $P \\sim D$ be a set of problems sampled from a problem distribution $D$ .", + "bbox": [ + 84, + 830, + 477, + 906 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We can then extend our definitions for such a distribution through the following:", + "bbox": [ + 496, + 84, + 885, + 114 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nV _ {p \\sim D} (\\mathcal {M} _ {t}) = \\mathbb {E} _ {p \\sim D} [ V _ {p} (\\mathcal {M} _ {t}) ], \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 141, + 885, + 159 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nG _ {p \\sim D} (\\{m _ {t} \\}, \\mathcal {M} _ {t - 1}) = \\mathbb {E} _ {p \\sim D} [ G _ {p} (\\{m _ {t} \\}, \\mathcal {M} _ {t - 1}) ]. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 161, + 885, + 178 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.6. Estimating the Economic Efficiency", + "text_level": 1, + "bbox": [ + 496, + 210, + 779, + 226 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To operationalize our overall framework for any given distribution of problems, we introduce the following recipe:", + "bbox": [ + 496, + 234, + 887, + 266 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Estimate success rates. For each model-problem pair $(m,p)$ , generate a number of independent attempts to approximate $R_{m}(p)$ . We use the same prompt and model settings across these attempts, varying only factors necessary to ensure independence (e.g., internal sampling randomness).", + "(2) Estimate per-attempt cost. Track the average number of tokens (prompt + generation) consumed per attempt, multiply by the current token price (which can differ by model provider or usage level), and add any extra charges (e.g., third-party API calls, external reasoning modules, etc.). This sum yields $C_m(p)$ .", + "(3) Compute cost-of-pass. For each model $m$ , calculate $v(m, p) = C_m(p) / R_m(p)$ . ( $R_m(p) = 0$ yields $v(m, p) = \\infty$ .)", + "(4) Determine frontier cost-of-pass. Estimate human-expert cost $v(\\text{expert}, p)$ (see below). Find $V_{p}(\\mathcal{M} \\cup \\mathcal{M}_{0})$ for a given set of strategies $\\mathcal{M}$ .", + "(5) Analyze over benchmarks. Aggregate $V_{p}(\\mathcal{M})$ across problems $p \\sim D$ to get $V_{p \\sim D}(\\mathcal{M}_t)$ . Track progress over time (for $\\mathcal{M}_t$ ) and compute gain $G_{p \\sim D}$ for new models." + ], + "bbox": [ + 496, + 272, + 888, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.6.1. Estimating Human-Expert Cost", + "text_level": 1, + "bbox": [ + 496, + 619, + 764, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To estimate $v(\\text{expert}, p)$ , the plausible cost of obtaining a correct human-expert answer, we systematically determine the required qualifications, appropriate hourly compensation, and average time for a typical problem $p$ per dataset. We determine these quantities based on a hierarchy of evidence by prioritizing the dataset's creation process or associated studies (e.g., reported annotation pay/time (Parrish et al., 2022)). When direct data is absent, we leverage findings from closely related work (Zhang et al., 2024) or infer parameters from the dataset's context (e.g., deriving time-per-problem from contest rules (Art of Problem Solving, 2023)). Compensation rates are informed by reported study payments (Rein, 2024) or relevant market rates for comparable expertise (e.g., specialized tutoring rates (TutorCruncher, 2025; Wyzant Tutoring, 2025)).1", + "bbox": [ + 495, + 642, + 888, + 869 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 71 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1The full derivation, justification, and sources for our approach are detailed in Appendix A. The resulting estimates are in Table 3.", + "bbox": [ + 496, + 878, + 887, + 906 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/522c2601cc01bbc0a0ebbd3ad816b7d64f965dcbdf9ec52f20e4ef99b0dd25fb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.8e-50.192.7e-218.583.3815.33
GPT-4o mini5.4e-50.221.3e-225.382.0614.67
Llama-3.3-70B1.6e-40.167.4e-318.581.3110.67
Large Models
Llama-3.1-405B6.9e-40.146.7e-310.431.138.67
Claude Sonnet-3.52.1e-30.196.4e-314.062.5414.67
GPT-4o2.3e-30.176.2e-314.070.9614.01
Reasoning Models
OpenAI o1-mini5.4e-30.171.3e-212.270.504.80
OpenAI o11.9e-20.224.3e-28.070.902.85
DeepSeek-R11.8e-30.171.5e-214.570.213.41
OpenAI o3-mini1.1e-30.111.1e-28.180.762.03
", + "bbox": [ + 178, + 80, + 802, + 319 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: Frontier dollar cost-of-pass per model / dataset. Each entry is the expected dollar cost of a problem $p \\sim D$ with the presence of the model $m$ and a human expert: $V_{p \\sim D}(\\{m\\} \\cup \\mathcal{M}_0)$ . Per column, the 3 entries with the lowest value (i.e. best frontier cost-of-pass) have blue highlights. Different model families emerge as cost-effective at different task categories, highlighting the strengths of our evaluation.", + "bbox": [ + 83, + 327, + 888, + 368 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3. Experiments", + "text_level": 1, + "bbox": [ + 84, + 375, + 218, + 391 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1. Models and Datasets", + "text_level": 1, + "bbox": [ + 84, + 400, + 264, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Models. We consider three categories of models:", + "bbox": [ + 84, + 422, + 413, + 439 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Lightweight models: We use the per-token cost as a proxy and select models with a cost less than $1 per million input and output tokens (see Table 4): Llama-3.1-8B (Grattafori et al., 2024), GPT-4o mini (OpenAI, 2024), and Llama-3.3-70B (Meta-AI, 2024).", + "(2) Large models: We select large general-purpose LMs: Llama-3.1-405B (Grattafiori et al., 2024), Claude Sonnet-3.5 (Anthropic, 2024), and GPT-4o (Hurst et al., 2024).", + "(3) Reasoning models: We select models with special reasoning post-training, including OpenAI's o1-mini (OpenAI et al., 2024), o1 (OpenAI et al., 2024), and o3-mini (OpenAI, 2025), as well as DeepSeek R1 (Guo et al., 2025)." + ], + "bbox": [ + 84, + 446, + 475, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Within each category, we select three to four representative models released between the second half of 2024 and early 2025. To preserve the integrity of our temporal analysis, we prioritize the earliest stable releases and exclude research previews or experimental versions.", + "bbox": [ + 84, + 650, + 473, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We evaluate models across three sets of tasks:", + "bbox": [ + 84, + 733, + 457, + 748 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Basic quantitative tasks: These involve basic numerical reasoning. We include an arithmetic dataset (Two Digit Addition) to assess basic numerical computation, and GSM8K (Cobbe et al., 2021) to evaluate multi-step grade-school level problem solving.", + "(2) Knowledge-based tasks: These require recalling and reasoning over factual knowledge. We include a scientific knowledge-intensive question answering task (GPQA-Diamond (Rein et al., 2024)) to evaluate models' abl" + ], + "bbox": [ + 84, + 756, + 475, + 898 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "ity to recall and utilize complex scientific facts, and a bias benchmark (BBQ (Parrish et al., 2022)) to evaluate whether models rely on stereotypical knowledge or can disambiguate factual responses from biased defaults.", + "(3) Complex quantitative reasoning tasks: These require complex mathematical reasoning and problem solving. We use MATH-500 (Hendrycks et al., 2021; Lightman et al., 2023) to assess models on competition-level maths problems, and AIME24 (MAA, 2024) to evaluate performance on challenging problems from the 2024 American Invitational Mathematics Examination." + ], + "bbox": [ + 496, + 375, + 887, + 549 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Frontier Cost-of-Pass with a Single Model", + "text_level": 1, + "bbox": [ + 496, + 566, + 823, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this experiment, we aim to quantify the economic value each model $m$ generates on different distributions of problems $p \\sim D$ . For this, we take human-expert as a baseline and quantify the frontier cost-of-pass of a problem in the presence of the model $m$ : $V_{p \\sim D}(\\{m\\} \\cup \\mathcal{M}_0)$ .", + "bbox": [ + 495, + 590, + 887, + 667 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The results in Table 1, highlighting the top three costs, show that our frontier cost-of-pass effectively captures how different model families offer economic advantages across various task categories. We find that lightweight models yield the lowest frontier cost-of-pass on basic quantitative tasks, such as Two Digit Addition. This is expected, as all model families achieve high accuracy on this dataset, making the least expensive models the most cost-effective. In contrast, for knowledge-based tasks, larger models achieve a lower frontier cost-of-pass compared to lightweight ones. While the reasoning models, such as o1, are priced significantly more expensively compared to both large and lightweight models, they lead to significant performance improvements, which, overall, result in reductions in the cost-of-pass mainly in complex quantitative tasks.", + "bbox": [ + 495, + 672, + 888, + 900 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/50036048d55bd58bd00e762774ed0bdd190e68504cb77bf06c5f2d7a0774f944.jpg", + "image_caption": [ + "Two Digit Addition" + ], + "image_footnote": [], + "bbox": [ + 96, + 103, + 348, + 244 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/13635eb971e0c3632f92759d60ba47cf2eea72475e047a2d710ad29a2d1c717f.jpg", + "image_caption": [ + "BBQ" + ], + "image_footnote": [], + "bbox": [ + 370, + 103, + 620, + 243 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/482987eb0bc339f8ccfedce3697ce3ff1811641a2ba6b8966508ad35ca724c8a.jpg", + "image_caption": [ + "MATH500" + ], + "image_footnote": [], + "bbox": [ + 643, + 103, + 874, + 243 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/326a80519a5e711cd95c953ca4e4bfdf62a1180de18fd344f54905a9190e9426.jpg", + "image_caption": [ + "GSM8K", + "Figure 2: The frontier dollar cost-of-pass (i.e. $V_{p\\sim D}(\\mathcal{M}_t)$ steadily decreases with new model releases, spanning models released between May 2024 and February 2025. Y-axes are normalized (divided by $V_{p\\sim D}(\\mathcal{M}_0)$ , shown in percentage (%))." + ], + "image_footnote": [], + "bbox": [ + 98, + 271, + 330, + 412 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/832bfe627fe748fd7b35bea328fab3574c3707e52b56a17298ca44617e7fc88e.jpg", + "image_caption": [ + "GPQA Diamond" + ], + "image_footnote": [], + "bbox": [ + 367, + 273, + 620, + 412 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3be70cbe724498608dfb9db5e73a4241a5c2f156493a7e6794bc974ffe034290.jpg", + "image_caption": [ + "AIME 2024" + ], + "image_footnote": [], + "bbox": [ + 640, + 271, + 877, + 412 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In contrast, when either task performance $(R_{m}(p\\sim D))$ or cost $(C_m(p\\sim D)$ is solely taken into account (Tables 5 and 6) such metrics tend to favor either reasoning models or lightweight models respectively due to their significant edge per criteria, without assessing the nuances in the economic impact they induce. This effectively highlights the sophistication of our metric and evaluation framework.", + "bbox": [ + 84, + 462, + 475, + 568 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3. Tracking Frontier Cost-of-Pass with New Releases", + "text_level": 1, + "bbox": [ + 84, + 584, + 470, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this experiment, we track the improvements on the frontier cost-of-pass for a problem. Figure 2 shows the trends of the cumulative gain per dataset $(V_{p\\sim D}(\\mathcal{M}_t))$ , each updated by the corresponding model release $(\\mathcal{M}_{t - 1}\\cup \\{m_t\\})$ . We observe a steady decline in the frontier cost-of-pass for complex quantitative tasks. In contrast, knowledge-based and basic quantitative tasks typically exhibit a sharp initial drop in frontier cost-of-pass with the early releases of models, followed by a plateau. To quantify the cost reduction trends, we empirically fit an exponential decay curve of the form:", + "bbox": [ + 84, + 608, + 475, + 758 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nV _ {p} \\left(M _ {t}\\right) \\approx a e ^ {- b t} + c, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 770, + 473, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $t$ denotes time in months since the first model release, and $a$ , $b$ , and $c$ are fit parameters. From this, we compute the time for the exponential component of the cost to drop by $50\\%$ : $T_{1/2} = \\ln(2)/b$ . Using this formulation, we find that for complex quantitative tasks, between May 2024 and February 2025, the frontier cost-of-pass for MATH500 halved approximately every 2.6 months, whereas for AIME", + "bbox": [ + 84, + 799, + 475, + 905 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2024, the halving time was 7.1 months—indicating consistent cost reductions over the past year.", + "bbox": [ + 496, + 462, + 888, + 493 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. Essentialness of Model Families: Counterfactual Frontier Cost-of-Pass", + "text_level": 1, + "bbox": [ + 496, + 508, + 867, + 539 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Section 3.3 showed the frontier cost-of-pass decreasing over time with new model releases. To understand which model families were most critical to this progress, we conduct a counterfactual analysis that quantifies the impact of removing each family. Defining $\\mathcal{M}_g$ as a family of models (lightweight, large, or reasoning), we measure the counterfactual contribution of family $g$ on dataset $D$ by calculating the relative improvement in frontier cost-of-pass attributable to its inclusion:", + "bbox": [ + 495, + 547, + 887, + 681 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {G _ {p \\sim D} \\left(\\mathcal {M} _ {g} , \\mathcal {M} _ {T} \\backslash \\mathcal {M} _ {g}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {T}\\right)}. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 690, + 885, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, $\\mathcal{M}_T$ includes all models used in our experiments. This metric represents the relative improvement in the final frontier cost-of-pass $V_{p\\sim D}(\\mathcal{M}_T)$ attributable to the model family $\\mathcal{M}_g$ , with higher values indicating greater essentialness of that family for achieving the current frontier.", + "bbox": [ + 495, + 732, + 887, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 3 illustrates our main findings, revealing distinct roles across model families. Lightweight models help reduce the frontier cost-of-pass on basic quantitative tasks, while large models drive performance on knowledge-intensive tasks. Reasoning models play a key role in advancing the frontier for complex quantitative reasoning and also improve", + "bbox": [ + 495, + 814, + 888, + 906 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9d65079fdb587bed54bc16355915a89bba1090a93865eeaf66281756361e237d.jpg", + "image_caption": [ + "Figure 3: The relative improvement $(\\%)$ in frontier cost-of-pass attributable to each model family $g$ , calculated under a counterfactual setting where $\\mathcal{M}_g$ is removed. Higher values signify greater essentialness for maintaining the current frontier." + ], + "image_footnote": [], + "bbox": [ + 181, + 84, + 794, + 244 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d5d1dd0f4fce7f531bc4229a090b9b204042e706c64543dd30f1b2553c279874.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Inference Time TechniqueBasic QuantitativeKnowledge BasedComplex Quantitative
Two Digit AdditionGSM8KBBQGPQA DiamondMATH500AIME24
Self-Refine006.724.900
Maj. Vote (k=3)000000
Maj. Vote (k=4)000000
", + "bbox": [ + 119, + 303, + 854, + 390 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Relative performance gains (%) from different inference time techniques across datasets.", + "bbox": [ + 194, + 397, + 774, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "performance on GPQA-Diamond, as well as GSM8K, which benefits from small reasoning models like o3-mini.", + "bbox": [ + 84, + 420, + 473, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "These findings highlight that progress on different task types is driven by different model paradigms. While large models have brought clear gains on knowledge-intensive tasks (e.g., GPQA), recent improvements in cost-efficiency—especially in more quantitative domains—appear largely driven by advances in lightweight and reasoning models. Together, these results suggest that the current cost-efficiency frontier, as reflected in our framework, is shaped mainly by (i) lightweight models and (ii) reasoning models.", + "bbox": [ + 84, + 458, + 475, + 594 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.5. Impact of Inference Time Techniques on Frontier Cost-of-Pass", + "text_level": 1, + "bbox": [ + 84, + 609, + 465, + 640 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We now assess whether common inference-time techniques provide meaningful economic benefits. Specifically, we ask: is it cost-effective to improve model performance through these techniques, compared to relying on the models' baseline performance? To explore this, we focus on the set of lightweight and large models, denoted by $\\mathcal{M}_L$ .", + "bbox": [ + 84, + 648, + 475, + 741 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "First, we determine the frontier cost-of-pass achieved by $\\mathcal{M}_L$ without any modifications. We then apply a given inference-time technique uniformly across all models in $\\mathcal{M}_L$ , yielding a modified set $\\mathcal{M}_L^*$ . The gain from this technique, measured relative to the original frontier cost-of-pass, can be computed as follows:", + "bbox": [ + 84, + 747, + 475, + 838 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {G _ {p \\sim D} \\left(\\mathcal {M} _ {L} ^ {*} , \\mathcal {M} _ {L}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {L}\\right)}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 848, + 475, + 882 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this study, we consider two popular techniques: self-", + "bbox": [ + 84, + 890, + 477, + 906 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "refinement (Madaan et al., 2023) and majority voting (a.k.a. self-consistency; Wang et al., 2023), with 3 and 4 votes.", + "bbox": [ + 496, + 420, + 887, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Table 2, self-refinement shows moderate economic benefit on knowledge-intensive tasks, with a notable $24.9\\%$ improvement on GPQA Diamond. In contrast, majority voting—despite potentially enhancing raw accuracy—does not offer relative economic improvement across the tested models and datasets.", + "bbox": [ + 496, + 458, + 888, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Collectively, these findings suggest, at least for the evaluated techniques, that the increased computational costs generally outweigh the performance benefits relative to the frontier cost-of-pass established by the baseline models. This implies that these common inference-time approaches may not be sufficient on their own to yield significant economic benefits within our evaluation framework for now.", + "bbox": [ + 495, + 556, + 887, + 661 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4. Related Works", + "text_level": 1, + "bbox": [ + 496, + 681, + 648, + 698 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Economic perspectives and broader impacts. The efficiency of LMs carries significant economic implications, as they are viewed as general-purpose technologies impacting productivity and labor (Eloundou et al., 2024; Brynjolfsson et al., 2025). Complementary economic analyses explore provider strategies regarding pricing and product design Bergemann et al. (2025), and user-side decision-making involving ROI, token costs, and success probabilities.", + "bbox": [ + 495, + 707, + 885, + 829 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our cost-of-pass metric serves as a crucial bridge between these technical realities of model performance and their economic consequences. By providing a fundamental measure, the expected monetary cost to successfully complete", + "bbox": [ + 495, + 835, + 887, + 897 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "a task, it allows for quantifying the economic contribution of specific AI systems and informs rational model selection for achieving economic viability, and provides quantitative perspective on the economic evolution of the LM ecosystem.", + "bbox": [ + 84, + 84, + 475, + 147 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "LM resource consumption, efficiency optimization and benchmarking. Research increasingly recognizes the importance of LM resource consumption and efficiency. Studies have quantified operational costs like tokens (Chen et al., 2023) and energy (Maliakel et al., 2025), revealing task-dependent performance and potential diminishing returns from high expenditure (Miserendino et al., 2025). This focus has intensified with the rise of reasoning methodologies (Sui et al., 2025) and inference-time techniques (e.g., Madaan et al. (2023); Wang et al. (2023)), which often trade increased computational cost for potential accuracy gains.", + "bbox": [ + 84, + 152, + 477, + 319 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Concerns like \"overthinking,\" where lengthy processing fails to improve results (Chen et al., 2024; Cuadron et al., 2025), have spurred efforts to optimize resource use through methods like dynamic token budgeting (Han et al., 2025), specialized training (Arora & Zanette, 2025), prompt engineering (Xu et al., 2025; Aytes et al., 2025) or researching optimal reasoning lengths (Wu et al., 2025; Yang et al., 2025). Concurrently, evaluation methodologies have evolved beyond pure accuracy or correctness measures.", + "bbox": [ + 84, + 325, + 475, + 464 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Recognizing its insufficiency, researchers have incorporated cost via fixed budgets (Wang et al., 2024), performance heuristics (McDonald et al., 2024), or non-monetary metrics like conciseness (Nayab et al., 2024). Kapoor et al. (2024) strongly advocated for using real dollar costs and accounting for stochasticity—factors central to our approach. Benchmarking efforts have also highlighted diminishing returns from simply scaling inference computation (Parashar et al., 2025). While these works underscore the need for cost-aware analysis, they often rely on specific constraints (e.g., fixed budgets) or heuristic metrics.", + "bbox": [ + 84, + 469, + 477, + 636 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our cost-of-pass framework seeks to advance this by providing a single, interpretable metric grounded in economic production principles, offering a unified way to assess the economic viability of different models and techniques without predefined budget assumptions or proxy metrics.", + "bbox": [ + 84, + 643, + 477, + 720 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 84, + 738, + 205, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduced an economic framework designed to evaluate language models by integrating their performance with inference cost. Drawing from production theory, we conceptualize language models as stochastic producers, and assess their efficiency using our proposed cost-of-pass metric, which measures the expected cost per correct solution. Our analysis utilizes this metric alongside the frontier cost-of-pass, defined as the minimum achievable cost compared to an human expert baseline. This approach reveals distinct", + "bbox": [ + 84, + 763, + 477, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "economic roles played by different model classes. For instance, retrospective and counterfactual evaluations demonstrate that lightweight models primarily drive efficiency on basic tasks, whereas reasoning models are essential for complex problem-solving. Critically, our findings show that common inference-time techniques typically increase the cost-of-pass, thus failing to provide net economic benefits when compared to the progress made by improving the underlying models themselves. In conclusion, our framework offers a principled foundation for measuring language model innovation in economic terms. It serves as a valuable tool for guiding model selection and aligning AI development with real-world value.", + "bbox": [ + 496, + 84, + 887, + 282 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 498, + 300, + 656, + 316 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thank Federico Bianchi, Dan Jurafsky, Daniel E. Ho, Can Yesildere, and Semyon Lomasov for valuable comments and discussions in the early stages of this project. MHE gratefully acknowledges support from the Fulbright Foreign Student Program. BE gratefully acknowledges the support of the Stanford Knight-Hennessy Scholarship. MS gratefully acknowledges the support of an HAI-SAP Fellowship.", + "bbox": [ + 496, + 325, + 888, + 448 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 498, + 465, + 594, + 482 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1st grade 4th quarter expectations – fast facts timed tests. Elementary School Curriculum Note (online PDF), 2021. States 20–25 addition problems should be solved in 1 minute (2–3 sec each) (Fas, 2021).", + "Daron Acemoglu. The Simple Macroeconomics of AI. NBER Working Papers 32487, National Bureau of Economic Research, Inc, May 2024. URL https://ideas.repec.org/p/nbr/nberwo/32487.html.", + "Dennis Aigner, C.A.Knox Lovell, and Peter Schmidt. Formulation and estimation of stochastic frontier production function models. Journal of Econometrics, 6(1):21-37, 1977. ISSN 0304-4076. doi: https://doi.org/10.1016/0304-4076(77)90052-5. URL https://www.sciencedirect.com/science/article/pii/0304407677900525.", + "Anthropic. Claude 3.5 sonnet announcement, 2024. URL https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 13 Feb. 2025.", + "Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025.", + "Art of Problem Solving. American Invitational Mathematics Examination (AIME) Format. AoPS Wiki (aops.com), 2023. States AIME is 15 questions in 3 hours (12 min" + ], + "bbox": [ + 500, + 489, + 888, + 906 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "per problem) (Art of Problem Solving, 2023). Accessed Mar 25, 2025.", + "Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179, 2025.", + "Dirk Bergemann, Alessandro Bonatti, and Alex Smolin. The economics of large language models: Token allocation, fine-tuning, and optimal pricing. arXiv preprint arXiv:2502.07736, 2025.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787.", + "Erik Brynjolfsson, Danielle Li, and Lindsey Raymond. *Generative ai at work.* The *Quarterly Journal of Economics*, pp. qjae044, 2025.", + "Lingjiao Chen, Matei Zaharia, and James Zou. Frugalgpt: How to use large language models while reducing cost and improving performance. arXiv preprint arXiv:2305.05176, 2023.", + "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2+$ $3=$ ? on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025.", + "Tyna Eloundou, Sam Manning, Pamela Mishkin, and Daniel Rock. Gpts are gpts: Labor market impact potential of llms. Science, 384(6702):1306-1308, 2024.", + "Michael James Farrell. The measurement of productive efficiency. Journal of the royal statistical society: series A (General), 120(3):253-281, 1957.", + "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan," + ], + "bbox": [ + 86, + 85, + 475, + 905 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. Token-budget-aware llm reasoning, 2025. URL https://arxiv.org/abs/2412.18547.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe.", + "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihindra, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024.", + "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023.", + "MAA. American Invitational Mathematics Examination (AIME). https://maa.org/maa-invitational-competitions/, 2024. Accessed: 2025-03-25.", + "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023.", + "Paul Joe Maliakel, Shashikant Ilager, and Ivona Brandic. Investigating energy efficiency and performance trade-offs in llm inference across tasks and dvfs settings. arXiv preprint arXiv:2501.08219, 2025.", + "Tyler McDonald, Anthony Colosimo, Yifeng Li, and Ali Emami. Can we afford the perfect prompt? balancing cost and accuracy with the economical prompting index. arXiv preprint arXiv:2412.01690, 2024." + ], + "bbox": [ + 500, + 85, + 885, + 905 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 71 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Meta-AI. Llama 3.3 70b instruct model, 2024. URL https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct.", + "Samuel Miserendino, Michele Wang, Tejal Patwardhan, and Johannes Heidecke. Swe-lancer: Can frontier llms earn $1 million from real-world freelance software engineering? arXiv preprint arXiv:2502.12115, 2025.", + "Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicolamaria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024.", + "OpenAI. Gpt-4o mini: Advancing cost-efficient intelligence, 2024. URL https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/.", + "OpenAI. Openai o3-mini system card, 2025. URL https://openai.com/index/o3-mini-system-card/.", + "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quinonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan" + ], + "bbox": [ + 86, + 84, + 480, + 905 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan Thibault Sottiaux Thomas Degry Thomas Dimson Tianhao Zheng Timur Garipov Tom Stasi Trapit Bansal. Trevor Creech Troy Peterson Tyna Eloundou Valerie Qi,Vineet Kosaraju,Vinnie Monaco,Vitchyr Pong,Vlad Fomenko Weiyi ZhengWenda ZhouWes McCabe Wojciech ZarembaYann Dubois Yinghai LuYining Chen Young ChaYu BaiYuchen He,Yuchen Zhang,Yunyun Wang,Zheng Shao,and Zhuohan Li. Openai o1 system card2024. URL https://arxiv.org/abs/2412.16720.", + "Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025.", + "Alicia Parrish, Angelica Chen, Nikita Nangia, Vishakh Padmakumar, Jason Phang, Jana Thompson, Phu Mon Htut, and Samuel Bowman. BBQ: A hand-built bias benchmark for question answering. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 2086-2105, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022." + ], + "bbox": [ + 500, + 84, + 887, + 905 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "findings-acl.165. URL https://aclanthology.org/2022.findings-acl.165/.", + "David Rein. Can good benchmarks contain mistakes? NYU Alignment Research Group Blog, May 2024. Reveals GPQA expert pay (\\(100/hr) and non-expert solve times (Rein, 2024). Online: wp.nyu.edu/...mistakes.", + "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof qa benchmark. In First Conference on Language Modeling, 2024.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling lIm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024.", + "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419, 2025.", + "TutorCruncher. Average tutoring rates use: How much do tutors charge per hour? TutorCruncher Blog, Feb 2025. Reports $45-$ 100/hr as typical range for test-prep tutoring (TutorCruncher, 2025).", + "Upwork. Data entry specialist hourly rates (cost to hire data entry specialist). Upwork Hiring Guide, 2025. Median $13/hr for data entry freelancers;$ 10–$20/hr typical range (Upwork, 2025). Accessed Mar 25, 2025.", + "Junlin Wang, Siddhartha Jain, Dejiao Zhang, Baishakhi Ray, Varun Kumar, and Ben Athiwaratkun. Reasoning in token economies: Budget-aware evaluation of llm reasoning strategies. arXiv preprint arXiv:2406.06461, 2024.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw.", + "Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms. arXiv preprint arXiv:2502.07266, 2025." + ], + "bbox": [ + 86, + 85, + 475, + 904 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wyzant Tutoring. New jersey math tutors cost $33 -$ 55 per hour on average. Wyzant.com (tutoring rate listing), 2025. Average private tutoring rates for math (K-12 and competition) (Wyzant Tutoring, 2025). Accessed Mar 25, 2025.", + "Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025.", + "Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for ltm reasoning. arXiv preprint arXiv:2502.18080, 2025.", + "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Charlotte Zhuang, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. In NeurIPS 2024 Datasets and Benchmarks Track, 2024. Reports human solve rate on GSM8K: 4 problems/15 min (3.7 min each) (Zhang et al., 2024)." + ], + "bbox": [ + 500, + 84, + 885, + 416 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 71 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A. Details of Human Expert Cost Estimation", + "text_level": 1, + "bbox": [ + 84, + 83, + 464, + 101 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we introduce the detailed analysis of how the human expert costs in Table 3 are calculated per dataset. AIME (American Invitational Mathematics Examination) consists of 15 challenging math problems in a 3-hour contest (administered in two separate sections: AIME I & II), giving an average of about 12 minutes per problem (Art of Problem Solving, 2023). In practice, expert math tutors for competitions like AIME command high hourly fees in the range of $45 -$ 100, reflecting intensive test-preparation rates (TutorCruncher, 2025). This rate range aligns with specialized test prep tutoring in the US, which is higher than regular tutoring due to the advanced problem-solving skills required (TutorCruncher, 2025). At roughly 12 minutes per AIME question on average, a solver could handle about five such problems per hour under exam conditions (Art of Problem Solving, 2023).", + "bbox": [ + 84, + 109, + 475, + 349 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "BBQ (Bias Benchmark for QA) contains short question-answer scenarios targeting social bias. Crowdworkers annotating BBQ have been paid around $15 per hour, a rate chosen to exceed U.S. minimum wage (Parrish et al., 2022). Because each task includes multiple BBQ questions, workers were able to answer roughly 5 questions in 2 minutes (Parrish et al., 2022) - i.e. ~24 seconds per question, or about 0.4 minutes per question. This fast per-question time reflects the fact that BBQ items are short multiple-choice queries, allowing a human annotator to complete approximately 150 BBQ questions in an hour at that pay rate (Parrish et al., 2022).", + "bbox": [ + 84, + 358, + 477, + 540 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "GPQA Diamond consists of extremely difficult graduate-level science questions, so human experts demand high compensation. In one case, domain experts were paid about \\(100 per hour to contribute and validate GPQA questions (Rein et al., 2024). These questions are \"Google-proof\" and time-consuming: skilled non-expert participants spent over 30-35 minutes on average per question when attempting to solve GPQA problems with unrestricted web access (Rein et al., 2024). This long duration per question underscores GPQA's complexity – at most 2 questions could be solved in an hour even by motivated annotators, which justifies the premium expert hourly rate (Rein, 2024).", + "bbox": [ + 84, + 547, + 475, + 728 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "GSM8K contains grade-school level math word problems. Solving these is relatively time-efficient for adults: in one study, crowdworkers under time pressure managed to solve about 4.07 GSM8K problems in 15 minutes on average (Zhang et al., 2024). That corresponds to roughly 3.7 minutes per question for a human solver. The required skill is comparable to general math tutoring at the K-8 level, for which typical U.S. tutor rates are about $33 -$ 55 per hour on platforms like Wyzant (Wyzant Tutoring, 2025). At such a rate, paying a person to solve GSM8K problems would be economical, given that a proficient solver can complete", + "bbox": [ + 84, + 734, + 475, + 902 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "approximately 16 questions in one hour (Zhang et al., 2024).", + "bbox": [ + 496, + 84, + 887, + 101 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "MATH500 is a set of 500 advanced competition math problems (drawn from the harder tier of a larger MATH dataset). These problems are similar in difficulty to top-level contest questions such as late AIME or Olympiad qualifying problems. As with AIME, a well-prepared human might spend on the order of 10-15 minutes per problem, roughly $\\sim$ 12 minutes on average for a hard competition question (Art of Problem Solving, 2023). Tutors capable of solving and teaching such Olympiad-level problems often charge rates on the order of $50 per hour (with a typical range of$ 35- $60 for competition math tutoring) (Wyzant Tutoring, 2025). This implies that solving roughly five MATH500 problems could cost about $50 and take around an hour, consistent with the per-question time and high skill required.", + "bbox": [ + 495, + 107, + 888, + 319 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Two-Digit Addition consists of simple two-digit addition problems, which are very quick for humans to solve. Early elementary students are often expected to complete about 20-25 basic addition problems in one minute in \"mad minute\" drills (Fas, 2021). This corresponds to roughly 2-3 seconds per addition (0.04 minutes per question). Because the task is so elementary, the labor to solve large numbers of such problems can be valued at a lower hourly rate. Simple data-entry style work or basic math tasks on freelance platforms pay on the order of $10 -$ 20 per hour (Upwork, 2025). At $15/hour, for example, a worker could theoretically solve several hundred 2-digit additions within the hour, given the ~3-second average solution time (Fas, 2021).", + "bbox": [ + 495, + 325, + 888, + 522 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B. Details of Evaluation", + "text_level": 1, + "bbox": [ + 496, + 542, + 700, + 556 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For each dataset in our evaluation, we sample up to 128 instances and run each model $n = 8$ times to estimate the expected runtime cost and accuracy per sample. For all models except OpenAI's reasoning models, we set the temperature to 0.7 and top_p to 1.0. In the case of OpenAI's reasoning models, we use a temperature of 1.0 and do not apply top_p. Additionally, we use the default maximum token generation limits provided by each model. Per sample, we employ a concise but descriptive instruction prompt for the models to follow.", + "bbox": [ + 495, + 568, + 887, + 718 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In our experiments, we define the pass $r_m(p)$ as whether the model obtains a correct answer after a single run or not (0 or 1), and the cost $c_m(p)$ as:", + "bbox": [ + 495, + 727, + 885, + 772 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nc _ {m} (p) = n _ {\\text {i n}} (m, p) \\cdot c _ {\\text {i n}} (m) + n _ {\\text {o u t}} (m, p) \\cdot c _ {\\text {o u t}} (m) \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 777, + 885, + 795 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $n_{*}(m,p)$ denotes the number of input / output tokens consumed / generated by the model $m$ on problem $p$ , and $c_{*}(m)$ denotes the dollar costs per input / output tokens consumed / generated by the model $m$ (see Table 4 for the pricing). For the expert costs, we utilize the estimations from Table 3, and set the rates to the upper-bound value to ensure the approximation of the expert accuracy being 1.", + "bbox": [ + 495, + 800, + 885, + 905 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/3a562bbe1004e7cf970e8b8277eea3ed839c8f64724ec353dd49ba5d688790e0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetQualification RequirementsHourly RateTime per QuestionEst. Cost
AIMEAdvanced high-school contest math skills$45–$100~12 minutes$9–$20
BBQGeneral familiarity with social biases$15~0.4 minutes (24 sec)$0.10
GPQA Dia.Graduate-level domain expertise$100~35 minutes$58
GSM8KBasic arithmetic reasoning$33–$55~3.7 minutes$2–$3.50
MATH500Strong competition-level problem-solving$35–$60~12 minutes$7–$12
Two-Digit Add.Basic numeracy$10–$20~0.04 minutes (3 sec)$0.01–$0.02
", + "bbox": [ + 99, + 80, + 874, + 234 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 3: Estimated costs of hiring a human expert to solve one question from each dataset, based on typical qualifications, hourly rates, and time per question.", + "bbox": [ + 84, + 244, + 887, + 272 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Experiment Prompt", + "text_level": 1, + "bbox": [ + 93, + 280, + 241, + 295 + ], + "page_idx": 12 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Please solve the following question. You can explain your solution before presenting the final answer. Format your final answer as: ... Instructions: - For multiple-choice: Give only the letter (e.g., (A)). - For numeric: Give only the number (e.g., 42). - For free-response: Provide the full final answer text. INPUT: , , {input} ,", + "guess_lang": "txt", + "bbox": [ + 93, + 299, + 467, + 612 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C. Additional Results", + "text_level": 1, + "bbox": [ + 86, + 638, + 269, + 655 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.1. Expected Accuracy and Inference Costs", + "text_level": 1, + "bbox": [ + 84, + 665, + 401, + 681 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As discussed in the Section 3.2, we share the results of expected cost and accuracy per model per dataset. We can observe the skewed preference of a particular model family under each metric, implying the inability of expressing economic impact of models through these metrics solely.", + "bbox": [ + 84, + 689, + 475, + 765 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.2. Relative Gain per Model Release", + "text_level": 1, + "bbox": [ + 84, + 781, + 352, + 796 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Figure 4 presents the relative improvement in temporal frontier cost-of-pass for each model release, illustrated using bar plots. Namely, we calculate:", + "bbox": [ + 84, + 804, + 475, + 851 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {G _ {p \\sim D} \\left(\\left\\{m _ {t} \\right\\} , \\mathcal {M} _ {t - 1}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {t - 1}\\right)} \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 869, + 475, + 904 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The results indicate that the reasoning models demonstrate notable advancements, particularly on complex quantitative tasks. In contrast, lightweight models exhibit marked gains on basic tasks. These findings support the observations from our experiments (Sections 3.2, 3.4). Notably, The substantial improvement observed for GPT-4o is likely due to it being the first model included in our analysis, resulting in a pronounced leap relative to the baseline cost associated with human expert annotation.", + "bbox": [ + 496, + 280, + 885, + 416 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.3. Counterfactual Frontier Cost-of-Pass in the Absence of a Single Model", + "text_level": 1, + "bbox": [ + 496, + 431, + 838, + 463 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, following the methodology outlined in Section 3.4, we quantify the relative improvement in frontier cost-of-pass using a counterfactual approach. Specifically, for each model $m_{*}$ , we calculate the following:", + "bbox": [ + 496, + 470, + 888, + 531 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {G _ {p \\sim D} \\left(\\left\\{m _ {*} \\right\\} , \\mathcal {M} _ {T} \\backslash \\left\\{m _ {*} \\right\\}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {T} \\backslash \\left\\{m _ {*} \\right\\}\\right)}, \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 537, + 885, + 573 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "quantifying the essentialness of the model $m_*$ . The results presented in Figure 5 demonstrate that the contributions of most individual models are largely compensable by the remaining models. Furthermore, we observe a similar coarse-level trend, as noted in Section 3.4, indicating that different model families provide greater benefits in specific task categories.", + "bbox": [ + 496, + 578, + 887, + 684 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "D. Limitations of Our Framework and Future Work Directions", + "text_level": 1, + "bbox": [ + 496, + 703, + 885, + 737 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we acknowledge the limitations of the presented framework and propose directions for future improvements and extensions.", + "bbox": [ + 496, + 747, + 887, + 791 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A primary limitation pertains to our definitions and computations of cost $(C_p(m))$ and performance $(R_{p}(m))$ . Specifically, our current cost computation considers only input and output token costs as proxies for the total expense incurred in obtaining correct outputs. This approach neglects indirect or overhead costs associated with generating incorrect outputs, such as subsequent verification costs. Regarding per", + "bbox": [ + 495, + 799, + 888, + 906 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/af65fcac32682b6e5ca9b5ec10bb48eec299698dbff86670f68f02431e483ce3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryModelRelease DateCost (per million tokens)
Input TokensOutput Tokens
Lightweight ModelsLlama-3.1-8B7/23/2024$0.18$0.18
GPT-4o Mini7/18/2024$0.15$0.60
Llama-3.3-70B12/6/2024$0.88$0.88
Large ModelsLlama-3.1-405B7/23/2024$3.50$3.50
GPT-4o5/13/2024$2.50$10.00
Claude Sonnet-3.56/20/2024$3.00$15.00
Reasoning ModelsOpenAI o1-mini9/12/2024$1.10$4.40
OpenAI o3-mini1/31/2025$1.10$4.40
DeepSeek-R11/20/2025$7.00$7.00
OpenAI o112/5/2024$15.00$60.00
", + "bbox": [ + 192, + 80, + 781, + 268 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/630111621fe5a87da45a6dc5cf0ef2e8173bf3a731e83a851a6919989be45eec.jpg", + "table_caption": [ + "Table 4: Per-token inference costs with release dates." + ], + "table_footnote": [], + "table_body": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B89.4575.7821.4817.8737.3012.50
GPT-4o mini99.9088.5753.3218.0770.0214.58
Llama-3.3-70B99.9092.0985.0646.4872.7533.33
Large Models
Llama-3.1-405B99.7193.9585.7444.1467.8731.67
Claude Sonnet-3.5100.0094.4392.5855.3764.7515.83
GPT-4o99.7191.9990.0447.0773.1414.58
Reasoning Models
OpenAI o1-mini99.5192.5885.7449.1285.9453.33
OpenAI o1100.0094.0495.0273.8389.4572.50
DeepSeek-R1100.0093.3683.6954.8893.8560.83
OpenAI o3-mini100.0092.7783.7971.6888.5777.08
", + "bbox": [ + 148, + 305, + 823, + 568 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/16bb05ff693bf5ab03e08b00cbae1fffb84268ea796fdc389e85b0fd26e7e712.jpg", + "table_caption": [ + "Table 5: Accuracy (%) per model per dataset: ${R}_{m}\\left( {p \\sim D}\\right)$ . In each column,the 3 entries with the highest accuracy have blue highlights." + ], + "table_footnote": [], + "table_body": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.2e-57.4e-55.2e-51.8e-41.5e-42.2e-4
GPT-4o mini5.4e-51.9e-41.0e-43.9e-43.7e-45.6e-4
Llama-3.3-70B1.6e-43.3e-43.1e-49.6e-46.7e-41.1e-3
Large Models
Llama-3.1-405B6.9e-41.4e-31.0e-33.0e-32.4e-33.7e-3
Claude Sonnet-3.52.1e-33.7e-33.0e-36.9e-35.9e-37.5e-3
GPT-4o2.3e-34.5e-32.7e-30.018.7e-30.01
Reasoning Models
OpenAI o1-mini5.4e-38.4e-37.6e-30.020.020.07
OpenAI o10.020.030.040.250.130.52
DeepSeek-R11.8e-35.1e-34.6e-30.040.010.04
OpenAI o3-mini1.1e-32.1e-32.6e-30.015.4e-30.02
", + "bbox": [ + 142, + 606, + 828, + 867 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 6: Dollar cost incurred per model per dataset: ${C}_{m}\\left( {p \\sim D}\\right)$ . In each column,the 3 entries with the lowest cost have blue highlights.", + "bbox": [ + 84, + 877, + 883, + 893 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 717, + 70 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/69cafd20b82715c5ce5bb220598d484c9b0fbbe4119cd95059eba426708f91b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 96, + 88, + 339, + 276 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d3e4f5986542f85a49bded9c7166a5adf5e87fd57d136419bdebcd51f870ec80.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 88, + 609, + 277 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/76e0e15128f0ec1aecaad72d16f7351d5da78fa90a7f7176ffeee04462d6efca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 88, + 877, + 277 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d905ee90552f923adb69e449a7d5dae8bc9ddb1cc5bc118940307875016f3c44.jpg", + "image_caption": [ + "Figure 4: Bar plot showing the percentage of change in frontier cost-of-pass per model release (i.e. $\\frac{G_{p\\sim D}(\\{m_t\\},\\mathcal{M}_{t-1})}{V_{p\\sim D}(\\mathcal{M}_{t-1})}$ )" + ], + "image_footnote": [], + "bbox": [ + 96, + 286, + 341, + 477 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/0528f4b08946c5d99d0f8a41955a8979a5315b0841581510756bf51c578c3295.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 286, + 609, + 477 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d5c65aa52d9a808be3ad8cf01ef796aa6d446f1572a321a2ae404561039933af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 286, + 875, + 477 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "formance, the use of accuracy as a binary success-or-failure metric presupposes the existence of a reliable verification pipeline and a practical decision mechanism, potentially oversimplifying scenarios where these assumptions do not hold. Additionally, our cost-of-pass metric, which combines cost and performance, currently does not account for variance information, limiting its practical interpretability in situations where two scenarios with similar cost-of-pass values exhibit substantially different variances. Furthermore, from a practical standpoint, cost modeling could consider alternative units (e.g., latency, inference time, FLOPs), which are currently not analyzed.", + "bbox": [ + 84, + 518, + 475, + 699 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Nevertheless, a significant strength of our framework is its abstract and modular design, facilitating extensions to address these limitations. Future work can enhance the precision of cost computations by integrating additional cost factors, such as verification overheads or indirect costs. Moreover, the framework could be adapted to alternative resource-consumption metrics like latency, inference time, or FLOPs. Regarding performance evaluation, the binary accuracy metric could be replaced or supplemented with alternative success measures tailored to specific scenarios, especially those emphasizing a particular balance between performance and cost. Incorporating variance and other statistical information into cost and performance calculations", + "bbox": [ + 84, + 705, + 477, + 902 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "could also enhance practical usability and interpretability.", + "bbox": [ + 496, + 518, + 879, + 534 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "An additional limitation lies in the evaluation methodology, particularly regarding human expert cost estimation. Our framework assumes that experts can reliably solve tasks given sufficient conditions (e.g., adequate qualifications, time, compensation). However, this assumption may not hold for particularly challenging problems or datasets with inherently high uncertainty in achieving correct solutions. Future research could address this limitation by conducting rigorous human subject studies to empirically evaluate and incorporate expert performance variability into the cost estimation process.", + "bbox": [ + 495, + 540, + 888, + 705 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/376285dc3f0a3c03e98979c258df0c77b14362360a25c3cd690f23f67492e99c.jpg", + "image_caption": [ + "Figure 5: The relative improvement $(\\%)$ in frontier cost-of-pass under a counterfactual setting, removing a model $m_*$ from the model set $\\mathcal{M}_T$ . High values mean that the model is essential for maintaining the current frontier." + ], + "image_footnote": [], + "bbox": [ + 96, + 295, + 875, + 667 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Cost-of-Pass: An Economic Framework for Evaluating Language Models", + "bbox": [ + 254, + 56, + 718, + 70 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 477, + 922, + 496, + 935 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_model.json b/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8b7b9c2ddc51e43106c676e9e642ce2c5d384ea6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_model.json @@ -0,0 +1,3334 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.11, + 0.856, + 0.132 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.177, + 0.798, + 0.194 + ], + "angle": 0, + "content": "Mehmet Hamza Erol* 1 Batu El* 1 Mirac Suzgun* 1 Mert Yuksekgonul† 1 James Zou† 1" + }, + { + "type": "title", + "bbox": [ + 0.242, + 0.221, + 0.321, + 0.237 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.252, + 0.445, + 0.829 + ], + "angle": 0, + "content": "The widespread adoption of AI systems in the economy hinges on their ability to generate economic value that outweighs their inference costs. Evaluating this tradeoff requires metrics that account for both performance and costs. We propose a framework grounded in production theory for evaluating language models by combining accuracy and inference cost. We introduce cost-of-pass, the expected monetary cost of generating a correct solution. We then define the frontier cost-of-pass as the minimum cost-of-pass achievable across available models or the human-expert, using the approximate cost of hiring an expert. Our analysis reveals distinct economic insights. First, lightweight models are most cost-effective for basic quantitative tasks, large models for knowledge-intensive ones, and reasoning models for complex quantitative problems, despite higher per-token costs. Second, tracking this frontier cost-of-pass over the past year reveals significant progress, particularly for complex quantitative tasks where the cost has roughly halved every few months. Third, to trace key innovations driving this progress, we examine counterfactual frontiers—estimates of cost-efficiency without specific model classes. We find that innovations in lightweight, large, and reasoning models have been essential for pushing the frontier in basic quantitative, knowledge-intensive, and complex quantitative tasks, respectively. Finally, we assess the cost-reductions afforded by common inference-time techniques like majority voting and self-refinement, finding that their marginal accuracy gains rarely justify their costs. Our findings underscore that complementary model-level innovations are the primary drivers of cost-efficiency, and our economic framework provides a principled tool for measuring this progress and guiding deployment." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.221, + 0.63, + 0.236 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.246, + 0.888, + 0.639 + ], + "angle": 0, + "content": "The recent progress in generative AI, particularly language models (LMs), has sparked significant interest in their potential to transform industries, automate cognitive tasks, and reshape economic productivity (Brynolfsson et al., 2025; Eloundou et al., 2024; Acemoglu, 2024). The widespread adoption of these AI systems in the economy hinges on whether the economic benefits generated by the tasks they can perform outweigh the associated inference costs, and whether those inference costs are lower than the cost of equivalent human labor. Consequently, two priorities have emerged at the forefront of LM research: advancing capabilities and reducing costs. These goals, however, often involve trade-offs with more powerful models or test-time techniques that offer higher accuracy at the expense of greater computational and monetary cost (Chen et al., 2024; Parashar et al., 2025; Madaan et al., 2023; Wang et al., 2023; Kapoor et al., 2024). While standard metrics capture accuracy or other system capabilities, they fail to account for cost, leading to an incomplete picture of progress. Ultimately, what matters to the users is not just raw capability, but the value delivered relative to cost and the standard has been to interpret and report these separately. As the ecosystem of models grows, it is essential to assess new models not in isolation, but in the context of a broader ecosystem, where marginal improvements may or may not justify higher costs, and do so in an easy-to-interpret manner." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.646, + 0.889, + 0.903 + ], + "angle": 0, + "content": "To systematically investigate the trade-off between cost and performance and analyze the LM ecosystem as a whole, we draw insights from a well-established and foundational framework from economics: production frontiers. Economists have long studied these frontiers, which map a set of inputs to the maximum output attainable under a given technology (Farrell, 1957). In Farrell's original formulation, a producer is technically efficient if no input can be reduced without lowering output, and price efficient if the input mix minimizes cost given input prices. Together, these conditions yield the lowest possible cost per unit of output. Extending this framework, Aigner et al. (1977) introduced stochastic frontier production functions, in which the relationship between inputs and output is modeled as stochastic rather than deterministic, practically accounting for potential defective outputs that do not pass evaluation criteria due to factors beyond the producer's control." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.13359v1 [cs.AI] 17 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.11, + 0.851, + 0.216, + 0.863 + ], + "angle": 0, + "content": "\\*Co-first authors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.864, + 0.228, + 0.877 + ], + "angle": 0, + "content": "†Co-senior authors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.878, + 0.47, + 0.892 + ], + "angle": 0, + "content": "\\(^{1}\\)Stanford University. \\(\\boxtimes\\) {mhamza, jamesz}@stanford.edu." + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.893, + 0.433, + 0.905 + ], + "angle": 0, + "content": "\\(\\ddagger\\)https://github.com/mhamzaerol/Cost-of-Pass." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.851, + 0.47, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "title", + "bbox": [ + 0.158, + 0.1, + 0.27, + 0.121 + ], + "angle": 0, + "content": "Concepts" + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.145, + 0.308, + 0.17 + ], + "angle": 0, + "content": "Cost-of-Pass: Expected cost of producing a correct output." + }, + { + "type": "equation", + "bbox": [ + 0.118, + 0.176, + 0.309, + 0.201 + ], + "angle": 0, + "content": "\\[\nv (m, p) = \\frac {\\mathbb {E} [ \\operatorname {c o s t} _ {m} (p) ]}{\\mathbb {E} [ \\operatorname {a c c u r a c y} _ {m} (p) ]} = \\frac {C _ {m} (p)}{R _ {m} (p)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.221, + 0.308, + 0.257 + ], + "angle": 0, + "content": "Human Expert Baseline Cost: Cost of hiring a human expert to produce a correct output." + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.261, + 0.284, + 0.275 + ], + "angle": 0, + "content": "\\[\nv (\\mathrm {e x p e r t}, p) \\approx C _ {\\mathrm {e x p e r t}} (p)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.297, + 0.31, + 0.333 + ], + "angle": 0, + "content": "Frontier Cost-of-Pass: Lowest cost-of-pass given available set of LMs & human expert baseline." + }, + { + "type": "equation", + "bbox": [ + 0.146, + 0.337, + 0.282, + 0.352 + ], + "angle": 0, + "content": "\\[\n\\min \\left(V _ {p} (\\mathcal {M}), v (\\text {e x p e r t}, p)\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.113, + 0.357, + 0.313, + 0.372 + ], + "angle": 0, + "content": "\\[\nV _ {p} (\\mathcal {M}) = \\min _ {m \\in \\mathcal {M}} v (m, p) \\longrightarrow \\text {B e s t L M C o s t - o f - P a s s}\n\\]" + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.093, + 0.593, + 0.127 + ], + "angle": 0, + "content": "(A) Frontier Cost-of-pass & Human Expert Baseline" + }, + { + "type": "image", + "bbox": [ + 0.35, + 0.133, + 0.596, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.244, + 0.58, + 0.276 + ], + "angle": 0, + "content": "(B) Progress as Frontier Cost-of-Pass over Time" + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.283, + 0.599, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.627, + 0.093, + 0.871, + 0.127 + ], + "angle": 0, + "content": "(C) Essentialness of Model Families to Task Categories" + }, + { + "type": "image", + "bbox": [ + 0.62, + 0.133, + 0.873, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.634, + 0.244, + 0.865, + 0.278 + ], + "angle": 0, + "content": "(D) Cost Reductions with Inference Time Techniques" + }, + { + "type": "image", + "bbox": [ + 0.62, + 0.285, + 0.873, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.402, + 0.888, + 0.48 + ], + "angle": 0, + "content": "Figure 1: Highlights of the cost-of-pass framework and empirical analyses. Core concepts (left) set foundations for: (A) Comparing the Human Expert Baseline to the frontier achieved by the single most effective LM per task category. (B) Tracking the reduction in frontier cost-of-pass over time, indicating progress driven by new model releases (color-coded by family). (C) Quantifying the essential contribution of each model family: lightweight (less than $1 per million tokens), large, and reasoning; to the current cost-efficiency frontier, measured by the percentage of each family's contribution. (D) Assessing the economic benefit (relative cost reduction) achieved by applying common inference-time techniques over the baseline model frontier (which rarely results in meaningful gains)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.488, + 0.477, + 0.699 + ], + "angle": 0, + "content": "These economic concepts are highly relevant to modern LMs, which inherently function as stochastic producers: for a given input, they yield a desired output (e.g., a correct solution) stochastically (Brown et al., 2024). Common practices such as employing scaffolds or more computationally intensive inference techniques (Snell et al., 2024; Madaan et al., 2023; Wang et al., 2023) represent efforts to manipulate this production process. These strategies seek to increase the probability of success but typically do so at the expense of higher computational cost, directly mirroring the economic trade-offs inherent in production efficiency. Motivated by these parallels and the economic goal of minimizing cost per successful output under uncertainty, we develop a quantitative framework tailored to LMs." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.707, + 0.377, + 0.721 + ], + "angle": 0, + "content": "We summarize our contributions as follows." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.73, + 0.476, + 0.88 + ], + "angle": 0, + "content": "Concepts. We introduce cost-of-pass (§2.2), which quantifies the expected monetary cost to achieve a successful output for a given problem. Building on this concept and incorporating a human-expert cost baseline, we define the frontier cost-of-pass as the minimum achievable cost-of-pass across all available options (LMs and human-expert) for that problem. We show these reveal distinct economic niches for model families (e.g., lightweight vs. reasoning models) on different tasks, which accuracy comparisons alone obscure (§3.2)." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.888, + 0.475, + 0.904 + ], + "angle": 0, + "content": "Tracking progress with frontier cost-of-pass. Using the" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.488, + 0.887, + 0.624 + ], + "angle": 0, + "content": "cost-of-pass and frontier cost-of-pass, we analyze economic improvements across three task categories from May 2024 to February 2025. We observe an exponential decrease in frontier cost-of-pass across all tasks, though the trends vary. Notably, we observe that, over the past year, the expected cost of generating a correct solution to complex quantitative problems has been cut in half every few months. We find that the frontier cost-of-pass is driven primarily by lightweight models and reasoning models (§3.3)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.631, + 0.888, + 0.783 + ], + "angle": 0, + "content": "Counterfactual frontier in the absence of model families. We show that our analysis reveals the complementary roles of different model types in driving recent progress. Innovations in lightweight models have been instrumental in reducing costs on basic quantitative tasks. Large models, by contrast, have been most impactful for knowledge-based benchmarks like GPQA Diamond (Rein et al., 2024). Meanwhile, reasoning models have been central to advances on complex quantitative reasoning challenges such as AIME (MAA, 2024) and MATH (Hendrycks et al., 2021) (\\(\\S\\) 3.4)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.79, + 0.889, + 0.896 + ], + "angle": 0, + "content": "Impact of post-hoc inference time techniques. We observe that common test-time techniques such as self-refinement (Madaan et al., 2023) and majority voting (self-consistency; Wang et al., 2022) to improve performance offer either limited or no economic benefits, indicating that the recent reductions in frontier cost-of-pass have been mostly driven by model-level innovations (§ 3.5)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.493, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.072 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.161, + 0.102 + ], + "angle": 0, + "content": "2. Setup" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.11, + 0.416, + 0.126 + ], + "angle": 0, + "content": "2.1. Economic Theory of Production Efficiency" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.134, + 0.477, + 0.255 + ], + "angle": 0, + "content": "Classical production theory examines how producers convert inputs into outputs efficiently. Given a set of producers \\(\\mathcal{F} = \\{f_0, \\dots, f_{n-1}\\}\\), we are often interested in the maximum output attainable for a given combination of inputs. If producing \\(u \\in \\mathbb{R}_{>0}\\) units of output requires an input vector \\(\\mathbf{x} \\in \\mathbb{R}_{\\geq 0}^k\\) (e.g., quantities of different resources), the input requirement set \\(P_u\\) contains all input vectors capable of producing at least \\(u\\) units:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.266, + 0.476, + 0.289 + ], + "angle": 0, + "content": "\\[\nP _ {u} = \\left\\{\\mathbf {x} \\mid \\max _ {f _ {i} \\in \\mathcal {F}} f _ {i} (\\mathbf {x}) \\geq u \\right\\}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.301, + 0.476, + 0.362 + ], + "angle": 0, + "content": "Based on this input requirement and a vector \\(\\mathbf{w_i} \\in \\mathbb{R}_{\\geq 0}^k\\) being the prices of the inputs (incurred by each producer \\(i\\)), the frontier cost for producing \\(u\\) units of output is the minimum cost required:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.372, + 0.476, + 0.398 + ], + "angle": 0, + "content": "\\[\nV _ {u} = \\min _ {\\mathbf {x} \\in P _ {u}, f _ {i} \\in \\mathcal {F}} \\mathbf {w} _ {\\mathbf {i}} ^ {T} \\mathbf {x}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.407, + 0.475, + 0.529 + ], + "angle": 0, + "content": "subject to \\( f_{i}(\\mathbf{x}) \\geq u \\) implicitly included in \\( \\mathbf{x} \\in P_u \\). This \\( V_{u} \\) quantifies the lowest possible cost to achieve output \\( u \\) given the available production technologies \\( (\\mathcal{F}) \\) and input prices \\( (\\mathbf{w_i}) \\). Farrell (1957) used these core concepts to build definitions for technical and price efficiency in a production ecosystem for producers. Critically, Aigner et al. (1977) extended this framework to handle stochastic production functions, where output is probabilistic for a given input." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.537, + 0.476, + 0.673 + ], + "angle": 0, + "content": "Building on this economic foundation, we adapt the core concept of a frontier cost \\((V_{u})\\) to represent the minimum achievable cost for obtaining a correct solution using LMs. Recognizing that a key aspect of LM behavior is its inherent stochasticity, an issue long addressed in economic production theory (Aigner et al., 1977), we incorporate this variability into our cost-efficiency metric. This enables us to align our framework with core production concepts and assess the economic impact of stochastic LM producers." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.688, + 0.424, + 0.705 + ], + "angle": 0, + "content": "2.2. Cost-of-Pass: An Efficiency Metric for LMs" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.712, + 0.476, + 0.805 + ], + "angle": 0, + "content": "Here we instantiate the economic framework for language models (LMs). Consider a specific problem \\( p \\), where the unit of production is a correct solution. We define a model \\( m \\) as an inference pipeline using an LM, acting as a stochastic producer. Two quantities characterize its efficiency on problem \\( p \\):" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.815, + 0.449, + 0.831 + ], + "angle": 0, + "content": "\\(R_{m}(p) = \\mathrm{Prob.}\\) of \\(m\\) producing a correct answer on \\(p\\)" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.834, + 0.487, + 0.851 + ], + "angle": 0, + "content": "\\(C_m(p) = \\text{Expected cost of one inference attempt by } m \\text{ on } p\\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.861, + 0.476, + 0.907 + ], + "angle": 0, + "content": "In the context of LMs, the inputs \\(\\mathbf{x}\\) correspond to resources like prompt and generated tokens, while the input prices \\(\\mathbf{w}\\) represent the costs per token charged by the provider. The" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.886, + 0.145 + ], + "angle": 0, + "content": "total cost of these inputs for a single inference attempt by model \\( m \\) on problem \\( p \\) is captured by \\( C_m(p) \\), effectively instantiating the term \\( \\mathbf{w}^T\\mathbf{x} \\) from the theory in the previous section." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.153, + 0.888, + 0.23 + ], + "angle": 0, + "content": "Since the model output is stochastic, the expected number of attempts to obtain the first correct solution is \\( 1 / R_{m}(p) \\), assuming independent trials. This yields the cost-of-pass, defined as the expected monetary cost to obtain one correct solution for problem \\( p \\):" + }, + { + "type": "equation", + "bbox": [ + 0.621, + 0.243, + 0.887, + 0.277 + ], + "angle": 0, + "content": "\\[\nv (m, p) = \\frac {C _ {m} (p)}{R _ {m} (p)}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.29, + 0.889, + 0.427 + ], + "angle": 0, + "content": "The cost-of-pass integrates both performance \\((R_{m}(p))\\) and cost \\((C_m(p))\\) into a single economically interpretable metric: it quantifies how efficiently financial resources are converted into correct outputs. This formulation mirrors classical production theory, where the goal is to assess the cost of achieving a specific target output (Farrell, 1957); in our case, the target is a correct solution. When a model cannot produce one \\((R_{m}(p) = 0)\\), the cost-of-pass becomes infinite, appropriately signaling infeasibility." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.443, + 0.744, + 0.457 + ], + "angle": 0, + "content": "2.3. The LM Frontier Cost-of-Pass" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.466, + 0.889, + 0.587 + ], + "angle": 0, + "content": "While cost-of-pass (§ 2.2) evaluates a single model's efficiency, understanding the overall state of LM capabilities for a given problem requires assessing the collective performance of the entire available LM ecosystem. Therefore, analogous to the frontier cost \\( V_{u} \\) (Eq. 2), we define the \\( LM \\) frontier cost-of-pass for problem \\( p \\) as the minimum cost-of-pass achievable using any available LM strategy \\( m \\) from the set \\( \\mathcal{M} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.607, + 0.602, + 0.887, + 0.625 + ], + "angle": 0, + "content": "\\[\nV _ {p} (\\mathcal {M}) = \\min _ {m \\in \\mathcal {M}} v (m, p). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.639, + 0.888, + 0.703 + ], + "angle": 0, + "content": "\\(V_{p}(\\mathcal{M})\\) quantifies the minimum expected cost to solve problem \\(p\\) using the most cost-effective model currently available within the set \\(\\mathcal{M}\\). If no LM in \\(\\mathcal{M}\\) can solve \\(p\\) (i.e., \\(R_{m}(p) = 0\\) for all \\(m\\in \\mathcal{M}\\)), then \\(V_{p}(\\mathcal{M}) = \\infty\\)." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.716, + 0.88, + 0.746 + ], + "angle": 0, + "content": "2.4. Grounding Evaluation: Estimated Human-Expert Baseline" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.755, + 0.888, + 0.907 + ], + "angle": 0, + "content": "The LM frontier cost-of-pass \\( V_{p}(\\mathcal{M}) \\) reveals the best LM performance but lacks context: it does not show if LMs are economically advantageous over human labor. Moreover, the LM frontier cost-of-pass can be infinite if no LM succeeds. To address both, we introduce human-expert baseline as a reference point, by considering a human-expert annotator as a specific strategy: \\( m_{\\mathrm{expert}} \\). Let \\( \\mathcal{M}_0 = \\{m_{\\mathrm{expert}}\\} \\) represent this baseline set. We assume experts typically achieve near-perfect correctness \\( (R_{\\mathrm{expert}}(p) \\approx 1) \\) for tasks they are qualified for. Thus, the cost-of-pass for a qualified" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.073 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.086, + 0.437, + 0.103 + ], + "angle": 0, + "content": "expert is approximately their labor cost per problem:" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.114, + 0.477, + 0.133 + ], + "angle": 0, + "content": "\\[\nv (\\text {e x p e r t}, p) \\approx C _ {\\text {e x p e r t}} (p). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.143, + 0.476, + 0.22 + ], + "angle": 0, + "content": "The estimation of \\( C_{\\mathrm{expert}}(p) \\) involves considering required expertise, time per problem, and appropriate compensation rates (detailed in § 2.6.1). By incorporating this baseline, we define the frontier cost-of-pass for problem \\( p \\), considering both LMs \\( (\\mathcal{M}) \\) and the human-expert alternative \\( (\\mathcal{M}_0) \\):" + }, + { + "type": "equation", + "bbox": [ + 0.128, + 0.232, + 0.476, + 0.25 + ], + "angle": 0, + "content": "\\[\nV _ {p} (\\mathcal {M} \\cup \\mathcal {M} _ {0}) = \\min \\left(V _ {p} (\\mathcal {M}), v (\\text {e x p e r t}, p)\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.262, + 0.478, + 0.339 + ], + "angle": 0, + "content": "This frontier cost-of-pass represents the true minimum expected cost to obtain a correct solution for problem \\( p \\) using the best available option, whether it's an LM or a human. Crucially, \\( V_{p}(\\mathcal{M} \\cup \\mathcal{M}_{0}) \\) is always finite (assuming finite human-expert cost and capability)." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.354, + 0.373, + 0.37 + ], + "angle": 0, + "content": "2.5. Measuring Progress and Value Gain" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.377, + 0.476, + 0.468 + ], + "angle": 0, + "content": "To track improvements against the best available option over time, let \\(\\mathcal{M}_t\\) denote the total set of available strategies at time \\(t\\), encompassing both the set of LM strategies released up to time \\(t\\) and the human-expert baseline \\(\\mathcal{M}_0\\), that is, \\(\\mathcal{M}_t = \\{m_{\\leq t}\\} \\cup \\mathcal{M}_0\\). The frontier cost-of-pass achievable at time \\(t\\) can be calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.481, + 0.476, + 0.505 + ], + "angle": 0, + "content": "\\[\nV _ {p} \\left(\\mathcal {M} _ {t}\\right) = \\min _ {m \\in \\mathcal {M} _ {t}} v (m, p). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.517, + 0.476, + 0.595 + ], + "angle": 0, + "content": "As new LM models \\(\\{m_t\\}\\) are released, the set expands such that \\(\\mathcal{M}_t = \\mathcal{M}_{t - 1} \\cup \\{m_t\\}\\). Consequently, the frontier cost-of-pass \\(V_{p}(\\mathcal{M}_{t})\\) forms a non-increasing sequence over time \\(t\\), tracking the reduction in the minimum cost needed to solve a particular problem \\(p\\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.6, + 0.476, + 0.661 + ], + "angle": 0, + "content": "To quantify the economic impact of new developments, we define the gain. When a new set of models \\(\\{m_t\\}\\) becomes available at time \\(t\\) (often a single model), the gain for problem \\(p\\) is the reduction it causes in the frontier cost-of-pass:" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.673, + 0.475, + 0.705 + ], + "angle": 0, + "content": "\\[\nG _ {p} \\left(\\left\\{m _ {t} \\right\\}, \\mathcal {M} _ {t - 1}\\right) = V _ {p} \\left(\\mathcal {M} _ {t - 1}\\right) - V _ {p} \\left(\\mathcal {M} _ {t - 1} \\cup \\left\\{m _ {t} \\right\\}\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.717, + 0.476, + 0.824 + ], + "angle": 0, + "content": "Note that \\( G_{p} \\) measures how much cheaper the new model(s), \\( \\{m_t\\} \\), make solving \\( p \\) compared to prior best options, including humans. Hence, a large \\( G_{p} \\) value indicates a significant economic contribution in solving \\( p \\). This notion underlies our experiments, analyzing the value generated by models relative to the human baseline and tracking the evolution of the overall frontier." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.831, + 0.478, + 0.907 + ], + "angle": 0, + "content": "Extending to a distribution. Although measuring frontier cost-of-pass and value gain for individual problems can be informative, particularly through a fine-grained perspective, we often care about more than a single instance. Let \\( P \\sim D \\) be a set of problems sampled from a problem distribution \\( D \\)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.887, + 0.116 + ], + "angle": 0, + "content": "We can then extend our definitions for such a distribution through the following:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.142, + 0.887, + 0.16 + ], + "angle": 0, + "content": "\\[\nV _ {p \\sim D} (\\mathcal {M} _ {t}) = \\mathbb {E} _ {p \\sim D} [ V _ {p} (\\mathcal {M} _ {t}) ], \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.162, + 0.887, + 0.179 + ], + "angle": 0, + "content": "\\[\nG _ {p \\sim D} (\\{m _ {t} \\}, \\mathcal {M} _ {t - 1}) = \\mathbb {E} _ {p \\sim D} [ G _ {p} (\\{m _ {t} \\}, \\mathcal {M} _ {t - 1}) ]. \\tag {10}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.212, + 0.78, + 0.227 + ], + "angle": 0, + "content": "2.6. Estimating the Economic Efficiency" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.235, + 0.888, + 0.267 + ], + "angle": 0, + "content": "To operationalize our overall framework for any given distribution of problems, we introduce the following recipe:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.273, + 0.889, + 0.349 + ], + "angle": 0, + "content": "(1) Estimate success rates. For each model-problem pair \\((m,p)\\), generate a number of independent attempts to approximate \\(R_{m}(p)\\). We use the same prompt and model settings across these attempts, varying only factors necessary to ensure independence (e.g., internal sampling randomness)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.356, + 0.889, + 0.448 + ], + "angle": 0, + "content": "(2) Estimate per-attempt cost. Track the average number of tokens (prompt + generation) consumed per attempt, multiply by the current token price (which can differ by model provider or usage level), and add any extra charges (e.g., third-party API calls, external reasoning modules, etc.). This sum yields \\( C_m(p) \\)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.453, + 0.887, + 0.5 + ], + "angle": 0, + "content": "(3) Compute cost-of-pass. For each model \\( m \\), calculate \\( v(m, p) = C_m(p) / R_m(p) \\). (\\( R_m(p) = 0 \\) yields \\( v(m, p) = \\infty \\).)" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.507, + 0.888, + 0.552 + ], + "angle": 0, + "content": "(4) Determine frontier cost-of-pass. Estimate human-expert cost \\( v(\\text{expert}, p) \\) (see below). Find \\( V_{p}(\\mathcal{M} \\cup \\mathcal{M}_{0}) \\) for a given set of strategies \\( \\mathcal{M} \\)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.56, + 0.887, + 0.606 + ], + "angle": 0, + "content": "(5) Analyze over benchmarks. Aggregate \\( V_{p}(\\mathcal{M}) \\) across problems \\( p \\sim D \\) to get \\( V_{p \\sim D}(\\mathcal{M}_t) \\). Track progress over time (for \\( \\mathcal{M}_t \\)) and compute gain \\( G_{p \\sim D} \\) for new models." + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.273, + 0.889, + 0.606 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.62, + 0.766, + 0.635 + ], + "angle": 0, + "content": "2.6.1. Estimating Human-Expert Cost" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.643, + 0.889, + 0.871 + ], + "angle": 0, + "content": "To estimate \\( v(\\text{expert}, p) \\), the plausible cost of obtaining a correct human-expert answer, we systematically determine the required qualifications, appropriate hourly compensation, and average time for a typical problem \\( p \\) per dataset. We determine these quantities based on a hierarchy of evidence by prioritizing the dataset's creation process or associated studies (e.g., reported annotation pay/time (Parrish et al., 2022)). When direct data is absent, we leverage findings from closely related work (Zhang et al., 2024) or infer parameters from the dataset's context (e.g., deriving time-per-problem from contest rules (Art of Problem Solving, 2023)). Compensation rates are informed by reported study payments (Rein, 2024) or relevant market rates for comparable expertise (e.g., specialized tutoring rates (TutorCruncher, 2025; Wyzant Tutoring, 2025)).1" + }, + { + "type": "page_footnote", + "bbox": [ + 0.497, + 0.879, + 0.888, + 0.907 + ], + "angle": 0, + "content": "1The full derivation, justification, and sources for our approach are detailed in Appendix A. The resulting estimates are in Table 3." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.058, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.081, + 0.803, + 0.32 + ], + "angle": 0, + "content": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.8e-50.192.7e-218.583.3815.33
GPT-4o mini5.4e-50.221.3e-225.382.0614.67
Llama-3.3-70B1.6e-40.167.4e-318.581.3110.67
Large Models
Llama-3.1-405B6.9e-40.146.7e-310.431.138.67
Claude Sonnet-3.52.1e-30.196.4e-314.062.5414.67
GPT-4o2.3e-30.176.2e-314.070.9614.01
Reasoning Models
OpenAI o1-mini5.4e-30.171.3e-212.270.504.80
OpenAI o11.9e-20.224.3e-28.070.902.85
DeepSeek-R11.8e-30.171.5e-214.570.213.41
OpenAI o3-mini1.1e-30.111.1e-28.180.762.03
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.328, + 0.89, + 0.369 + ], + "angle": 0, + "content": "Table 1: Frontier dollar cost-of-pass per model / dataset. Each entry is the expected dollar cost of a problem \\( p \\sim D \\) with the presence of the model \\( m \\) and a human expert: \\( V_{p \\sim D}(\\{m\\} \\cup \\mathcal{M}_0) \\). Per column, the 3 entries with the lowest value (i.e. best frontier cost-of-pass) have blue highlights. Different model families emerge as cost-effective at different task categories, highlighting the strengths of our evaluation." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.375, + 0.22, + 0.392 + ], + "angle": 0, + "content": "3. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.401, + 0.266, + 0.415 + ], + "angle": 0, + "content": "3.1. Models and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.424, + 0.414, + 0.44 + ], + "angle": 0, + "content": "Models. We consider three categories of models:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.447, + 0.477, + 0.523 + ], + "angle": 0, + "content": "(1) Lightweight models: We use the per-token cost as a proxy and select models with a cost less than $1 per million input and output tokens (see Table 4): Llama-3.1-8B (Grattafori et al., 2024), GPT-4o mini (OpenAI, 2024), and Llama-3.3-70B (Meta-AI, 2024)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.53, + 0.476, + 0.576 + ], + "angle": 0, + "content": "(2) Large models: We select large general-purpose LMs: Llama-3.1-405B (Grattafiori et al., 2024), Claude Sonnet-3.5 (Anthropic, 2024), and GPT-4o (Hurst et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.583, + 0.477, + 0.645 + ], + "angle": 0, + "content": "(3) Reasoning models: We select models with special reasoning post-training, including OpenAI's o1-mini (OpenAI et al., 2024), o1 (OpenAI et al., 2024), and o3-mini (OpenAI, 2025), as well as DeepSeek R1 (Guo et al., 2025)." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.447, + 0.477, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.651, + 0.475, + 0.727 + ], + "angle": 0, + "content": "Within each category, we select three to four representative models released between the second half of 2024 and early 2025. To preserve the integrity of our temporal analysis, we prioritize the earliest stable releases and exclude research previews or experimental versions." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.734, + 0.458, + 0.749 + ], + "angle": 0, + "content": "Datasets. We evaluate models across three sets of tasks:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.757, + 0.476, + 0.832 + ], + "angle": 0, + "content": "(1) Basic quantitative tasks: These involve basic numerical reasoning. We include an arithmetic dataset (Two Digit Addition) to assess basic numerical computation, and GSM8K (Cobbe et al., 2021) to evaluate multi-step grade-school level problem solving." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.839, + 0.476, + 0.9 + ], + "angle": 0, + "content": "(2) Knowledge-based tasks: These require recalling and reasoning over factual knowledge. We include a scientific knowledge-intensive question answering task (GPQA-Diamond (Rein et al., 2024)) to evaluate models' abl" + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.757, + 0.476, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.376, + 0.887, + 0.438 + ], + "angle": 0, + "content": "ity to recall and utilize complex scientific facts, and a bias benchmark (BBQ (Parrish et al., 2022)) to evaluate whether models rely on stereotypical knowledge or can disambiguate factual responses from biased defaults." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.444, + 0.888, + 0.55 + ], + "angle": 0, + "content": "(3) Complex quantitative reasoning tasks: These require complex mathematical reasoning and problem solving. We use MATH-500 (Hendrycks et al., 2021; Lightman et al., 2023) to assess models on competition-level maths problems, and AIME24 (MAA, 2024) to evaluate performance on challenging problems from the 2024 American Invitational Mathematics Examination." + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.376, + 0.888, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.567, + 0.825, + 0.582 + ], + "angle": 0, + "content": "3.2. Frontier Cost-of-Pass with a Single Model" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.591, + 0.888, + 0.669 + ], + "angle": 0, + "content": "In this experiment, we aim to quantify the economic value each model \\(m\\) generates on different distributions of problems \\(p \\sim D\\). For this, we take human-expert as a baseline and quantify the frontier cost-of-pass of a problem in the presence of the model \\(m\\): \\(V_{p \\sim D}(\\{m\\} \\cup \\mathcal{M}_0)\\)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.673, + 0.889, + 0.901 + ], + "angle": 0, + "content": "The results in Table 1, highlighting the top three costs, show that our frontier cost-of-pass effectively captures how different model families offer economic advantages across various task categories. We find that lightweight models yield the lowest frontier cost-of-pass on basic quantitative tasks, such as Two Digit Addition. This is expected, as all model families achieve high accuracy on this dataset, making the least expensive models the most cost-effective. In contrast, for knowledge-based tasks, larger models achieve a lower frontier cost-of-pass compared to lightweight ones. While the reasoning models, such as o1, are priced significantly more expensively compared to both large and lightweight models, they lead to significant performance improvements, which, overall, result in reductions in the cost-of-pass mainly in complex quantitative tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "image_caption", + "bbox": [ + 0.149, + 0.089, + 0.285, + 0.104 + ], + "angle": 0, + "content": "Two Digit Addition" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.104, + 0.349, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.089, + 0.51, + 0.103 + ], + "angle": 0, + "content": "BBQ" + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.104, + 0.621, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.728, + 0.089, + 0.8, + 0.103 + ], + "angle": 0, + "content": "MATH500" + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.104, + 0.875, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.189, + 0.258, + 0.246, + 0.271 + ], + "angle": 0, + "content": "GSM8K" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.272, + 0.331, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.259, + 0.55, + 0.272 + ], + "angle": 0, + "content": "GPQA Diamond" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.274, + 0.622, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.724, + 0.259, + 0.804, + 0.272 + ], + "angle": 0, + "content": "AIME 2024" + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.272, + 0.878, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.424, + 0.887, + 0.452 + ], + "angle": 0, + "content": "Figure 2: The frontier dollar cost-of-pass (i.e. \\( V_{p\\sim D}(\\mathcal{M}_t) \\) steadily decreases with new model releases, spanning models released between May 2024 and February 2025. Y-axes are normalized (divided by \\( V_{p\\sim D}(\\mathcal{M}_0) \\), shown in percentage (%))." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.463, + 0.477, + 0.569 + ], + "angle": 0, + "content": "In contrast, when either task performance \\((R_{m}(p\\sim D))\\) or cost \\((C_m(p\\sim D)\\) is solely taken into account (Tables 5 and 6) such metrics tend to favor either reasoning models or lightweight models respectively due to their significant edge per criteria, without assessing the nuances in the economic impact they induce. This effectively highlights the sophistication of our metric and evaluation framework." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.585, + 0.471, + 0.601 + ], + "angle": 0, + "content": "3.3. Tracking Frontier Cost-of-Pass with New Releases" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.609, + 0.477, + 0.76 + ], + "angle": 0, + "content": "In this experiment, we track the improvements on the frontier cost-of-pass for a problem. Figure 2 shows the trends of the cumulative gain per dataset \\((V_{p\\sim D}(\\mathcal{M}_t))\\), each updated by the corresponding model release \\((\\mathcal{M}_{t - 1}\\cup \\{m_t\\})\\). We observe a steady decline in the frontier cost-of-pass for complex quantitative tasks. In contrast, knowledge-based and basic quantitative tasks typically exhibit a sharp initial drop in frontier cost-of-pass with the early releases of models, followed by a plateau. To quantify the cost reduction trends, we empirically fit an exponential decay curve of the form:" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.771, + 0.475, + 0.79 + ], + "angle": 0, + "content": "\\[\nV _ {p} \\left(M _ {t}\\right) \\approx a e ^ {- b t} + c, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.8, + 0.476, + 0.906 + ], + "angle": 0, + "content": "where \\( t \\) denotes time in months since the first model release, and \\( a \\), \\( b \\), and \\( c \\) are fit parameters. From this, we compute the time for the exponential component of the cost to drop by \\( 50\\% \\): \\( T_{1/2} = \\ln(2)/b \\). Using this formulation, we find that for complex quantitative tasks, between May 2024 and February 2025, the frontier cost-of-pass for MATH500 halved approximately every 2.6 months, whereas for AIME" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.463, + 0.889, + 0.494 + ], + "angle": 0, + "content": "2024, the halving time was 7.1 months—indicating consistent cost reductions over the past year." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.51, + 0.869, + 0.54 + ], + "angle": 0, + "content": "3.4. Essentialness of Model Families: Counterfactual Frontier Cost-of-Pass" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.549, + 0.888, + 0.683 + ], + "angle": 0, + "content": "Section 3.3 showed the frontier cost-of-pass decreasing over time with new model releases. To understand which model families were most critical to this progress, we conduct a counterfactual analysis that quantifies the impact of removing each family. Defining \\(\\mathcal{M}_g\\) as a family of models (lightweight, large, or reasoning), we measure the counterfactual contribution of family \\(g\\) on dataset \\(D\\) by calculating the relative improvement in frontier cost-of-pass attributable to its inclusion:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.691, + 0.887, + 0.725 + ], + "angle": 0, + "content": "\\[\n\\frac {G _ {p \\sim D} \\left(\\mathcal {M} _ {g} , \\mathcal {M} _ {T} \\backslash \\mathcal {M} _ {g}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {T}\\right)}. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.733, + 0.888, + 0.809 + ], + "angle": 0, + "content": "Here, \\(\\mathcal{M}_T\\) includes all models used in our experiments. This metric represents the relative improvement in the final frontier cost-of-pass \\(V_{p\\sim D}(\\mathcal{M}_T)\\) attributable to the model family \\(\\mathcal{M}_g\\), with higher values indicating greater essentialness of that family for achieving the current frontier." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.815, + 0.889, + 0.907 + ], + "angle": 0, + "content": "Figure 3 illustrates our main findings, revealing distinct roles across model families. Lightweight models help reduce the frontier cost-of-pass on basic quantitative tasks, while large models drive performance on knowledge-intensive tasks. Reasoning models play a key role in advancing the frontier for complex quantitative reasoning and also improve" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.085, + 0.795, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.264, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Figure 3: The relative improvement \\((\\%)\\) in frontier cost-of-pass attributable to each model family \\(g\\), calculated under a counterfactual setting where \\(\\mathcal{M}_g\\) is removed. Higher values signify greater essentialness for maintaining the current frontier." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.304, + 0.855, + 0.391 + ], + "angle": 0, + "content": "
Inference Time TechniqueBasic QuantitativeKnowledge BasedComplex Quantitative
Two Digit AdditionGSM8KBBQGPQA DiamondMATH500AIME24
Self-Refine006.724.900
Maj. Vote (k=3)000000
Maj. Vote (k=4)000000
" + }, + { + "type": "table_caption", + "bbox": [ + 0.195, + 0.398, + 0.776, + 0.413 + ], + "angle": 0, + "content": "Table 2: Relative performance gains (%) from different inference time techniques across datasets." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.421, + 0.475, + 0.452 + ], + "angle": 0, + "content": "performance on GPQA-Diamond, as well as GSM8K, which benefits from small reasoning models like o3-mini." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.459, + 0.477, + 0.595 + ], + "angle": 0, + "content": "These findings highlight that progress on different task types is driven by different model paradigms. While large models have brought clear gains on knowledge-intensive tasks (e.g., GPQA), recent improvements in cost-efficiency—especially in more quantitative domains—appear largely driven by advances in lightweight and reasoning models. Together, these results suggest that the current cost-efficiency frontier, as reflected in our framework, is shaped mainly by (i) lightweight models and (ii) reasoning models." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.611, + 0.466, + 0.641 + ], + "angle": 0, + "content": "3.5. Impact of Inference Time Techniques on Frontier Cost-of-Pass" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.65, + 0.476, + 0.742 + ], + "angle": 0, + "content": "We now assess whether common inference-time techniques provide meaningful economic benefits. Specifically, we ask: is it cost-effective to improve model performance through these techniques, compared to relying on the models' baseline performance? To explore this, we focus on the set of lightweight and large models, denoted by \\(\\mathcal{M}_L\\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.748, + 0.476, + 0.839 + ], + "angle": 0, + "content": "First, we determine the frontier cost-of-pass achieved by \\(\\mathcal{M}_L\\) without any modifications. We then apply a given inference-time technique uniformly across all models in \\(\\mathcal{M}_L\\), yielding a modified set \\(\\mathcal{M}_L^*\\). The gain from this technique, measured relative to the original frontier cost-of-pass, can be computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.211, + 0.849, + 0.476, + 0.883 + ], + "angle": 0, + "content": "\\[\n\\frac {G _ {p \\sim D} \\left(\\mathcal {M} _ {L} ^ {*} , \\mathcal {M} _ {L}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {L}\\right)}. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.891, + 0.478, + 0.907 + ], + "angle": 0, + "content": "In this study, we consider two popular techniques: self-" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.421, + 0.888, + 0.452 + ], + "angle": 0, + "content": "refinement (Madaan et al., 2023) and majority voting (a.k.a. self-consistency; Wang et al., 2023), with 3 and 4 votes." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.459, + 0.889, + 0.548 + ], + "angle": 0, + "content": "As shown in Table 2, self-refinement shows moderate economic benefit on knowledge-intensive tasks, with a notable \\(24.9\\%\\) improvement on GPQA Diamond. In contrast, majority voting—despite potentially enhancing raw accuracy—does not offer relative economic improvement across the tested models and datasets." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.557, + 0.888, + 0.662 + ], + "angle": 0, + "content": "Collectively, these findings suggest, at least for the evaluated techniques, that the increased computational costs generally outweigh the performance benefits relative to the frontier cost-of-pass established by the baseline models. This implies that these common inference-time approaches may not be sufficient on their own to yield significant economic benefits within our evaluation framework for now." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.682, + 0.649, + 0.699 + ], + "angle": 0, + "content": "4. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.708, + 0.886, + 0.83 + ], + "angle": 0, + "content": "Economic perspectives and broader impacts. The efficiency of LMs carries significant economic implications, as they are viewed as general-purpose technologies impacting productivity and labor (Eloundou et al., 2024; Brynjolfsson et al., 2025). Complementary economic analyses explore provider strategies regarding pricing and product design Bergemann et al. (2025), and user-side decision-making involving ROI, token costs, and success probabilities." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.837, + 0.888, + 0.898 + ], + "angle": 0, + "content": "Our cost-of-pass metric serves as a crucial bridge between these technical realities of model performance and their economic consequences. By providing a fundamental measure, the expected monetary cost to successfully complete" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.492, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.148 + ], + "angle": 0, + "content": "a task, it allows for quantifying the economic contribution of specific AI systems and informs rational model selection for achieving economic viability, and provides quantitative perspective on the economic evolution of the LM ecosystem." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.153, + 0.478, + 0.32 + ], + "angle": 0, + "content": "LM resource consumption, efficiency optimization and benchmarking. Research increasingly recognizes the importance of LM resource consumption and efficiency. Studies have quantified operational costs like tokens (Chen et al., 2023) and energy (Maliakel et al., 2025), revealing task-dependent performance and potential diminishing returns from high expenditure (Miserendino et al., 2025). This focus has intensified with the rise of reasoning methodologies (Sui et al., 2025) and inference-time techniques (e.g., Madaan et al. (2023); Wang et al. (2023)), which often trade increased computational cost for potential accuracy gains." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.327, + 0.476, + 0.465 + ], + "angle": 0, + "content": "Concerns like \"overthinking,\" where lengthy processing fails to improve results (Chen et al., 2024; Cuadron et al., 2025), have spurred efforts to optimize resource use through methods like dynamic token budgeting (Han et al., 2025), specialized training (Arora & Zanette, 2025), prompt engineering (Xu et al., 2025; Aytes et al., 2025) or researching optimal reasoning lengths (Wu et al., 2025; Yang et al., 2025). Concurrently, evaluation methodologies have evolved beyond pure accuracy or correctness measures." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.47, + 0.478, + 0.637 + ], + "angle": 0, + "content": "Recognizing its insufficiency, researchers have incorporated cost via fixed budgets (Wang et al., 2024), performance heuristics (McDonald et al., 2024), or non-monetary metrics like conciseness (Nayab et al., 2024). Kapoor et al. (2024) strongly advocated for using real dollar costs and accounting for stochasticity—factors central to our approach. Benchmarking efforts have also highlighted diminishing returns from simply scaling inference computation (Parashar et al., 2025). While these works underscore the need for cost-aware analysis, they often rely on specific constraints (e.g., fixed budgets) or heuristic metrics." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.644, + 0.478, + 0.721 + ], + "angle": 0, + "content": "Our cost-of-pass framework seeks to advance this by providing a single, interpretable metric grounded in economic production principles, offering a unified way to assess the economic viability of different models and techniques without predefined budget assumptions or proxy metrics." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.739, + 0.206, + 0.755 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.765, + 0.478, + 0.903 + ], + "angle": 0, + "content": "We introduced an economic framework designed to evaluate language models by integrating their performance with inference cost. Drawing from production theory, we conceptualize language models as stochastic producers, and assess their efficiency using our proposed cost-of-pass metric, which measures the expected cost per correct solution. Our analysis utilizes this metric alongside the frontier cost-of-pass, defined as the minimum achievable cost compared to an human expert baseline. This approach reveals distinct" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.888, + 0.283 + ], + "angle": 0, + "content": "economic roles played by different model classes. For instance, retrospective and counterfactual evaluations demonstrate that lightweight models primarily drive efficiency on basic tasks, whereas reasoning models are essential for complex problem-solving. Critically, our findings show that common inference-time techniques typically increase the cost-of-pass, thus failing to provide net economic benefits when compared to the progress made by improving the underlying models themselves. In conclusion, our framework offers a principled foundation for measuring language model innovation in economic terms. It serves as a valuable tool for guiding model selection and aligning AI development with real-world value." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.301, + 0.658, + 0.318 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.327, + 0.89, + 0.449 + ], + "angle": 0, + "content": "We thank Federico Bianchi, Dan Jurafsky, Daniel E. Ho, Can Yesildere, and Semyon Lomasov for valuable comments and discussions in the early stages of this project. MHE gratefully acknowledges support from the Fulbright Foreign Student Program. BE gratefully acknowledges the support of the Stanford Knight-Hennessy Scholarship. MS gratefully acknowledges the support of an HAI-SAP Fellowship." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.467, + 0.596, + 0.483 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.491, + 0.889, + 0.553 + ], + "angle": 0, + "content": "1st grade 4th quarter expectations – fast facts timed tests. Elementary School Curriculum Note (online PDF), 2021. States 20–25 addition problems should be solved in 1 minute (2–3 sec each) (Fas, 2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.562, + 0.889, + 0.623 + ], + "angle": 0, + "content": "Daron Acemoglu. The Simple Macroeconomics of AI. NBER Working Papers 32487, National Bureau of Economic Research, Inc, May 2024. URL https://ideas.repec.org/p/nbr/nberwo/32487.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.633, + 0.889, + 0.738 + ], + "angle": 0, + "content": "Dennis Aigner, C.A.Knox Lovell, and Peter Schmidt. Formulation and estimation of stochastic frontier production function models. Journal of Econometrics, 6(1):21-37, 1977. ISSN 0304-4076. doi: https://doi.org/10.1016/0304-4076(77)90052-5. URL https://www.sciencedirect.com/science/article/pii/0304407677900525." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.75, + 0.888, + 0.794 + ], + "angle": 0, + "content": "Anthropic. Claude 3.5 sonnet announcement, 2024. URL https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 13 Feb. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.805, + 0.889, + 0.85 + ], + "angle": 0, + "content": "Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.861, + 0.889, + 0.907 + ], + "angle": 0, + "content": "Art of Problem Solving. American Invitational Mathematics Examination (AIME) Format. AoPS Wiki (aops.com), 2023. States AIME is 15 questions in 3 hours (12 min" + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.491, + 0.889, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.058, + 0.719, + 0.072 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.104, + 0.086, + 0.474, + 0.115 + ], + "angle": 0, + "content": "per problem) (Art of Problem Solving, 2023). Accessed Mar 25, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.126, + 0.476, + 0.185 + ], + "angle": 0, + "content": "Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.197, + 0.476, + 0.256 + ], + "angle": 0, + "content": "Dirk Bergemann, Alessandro Bonatti, and Alex Smolin. The economics of large language models: Token allocation, fine-tuning, and optimal pricing. arXiv preprint arXiv:2502.07736, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.267, + 0.476, + 0.342 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.353, + 0.476, + 0.398 + ], + "angle": 0, + "content": "Erik Brynjolfsson, Danielle Li, and Lindsey Raymond. *Generative ai at work.* The *Quarterly Journal of Economics*, pp. qjae044, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.408, + 0.476, + 0.468 + ], + "angle": 0, + "content": "Lingjiao Chen, Matei Zaharia, and James Zou. Frugalgpt: How to use large language models while reducing cost and improving performance. arXiv preprint arXiv:2305.05176, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.479, + 0.476, + 0.553 + ], + "angle": 0, + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for \\(2+\\) \\(3=\\)? on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.564, + 0.476, + 0.639 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.65, + 0.476, + 0.739 + ], + "angle": 0, + "content": "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.75, + 0.476, + 0.795 + ], + "angle": 0, + "content": "Tyna Eloundou, Sam Manning, Pamela Mishkin, and Daniel Rock. Gpts are gpts: Labor market impact potential of llms. Science, 384(6702):1306-1308, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.806, + 0.476, + 0.85 + ], + "angle": 0, + "content": "Michael James Farrell. The measurement of productive efficiency. Journal of the royal statistical society: series A (General), 120(3):253-281, 1957." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.861, + 0.476, + 0.906 + ], + "angle": 0, + "content": "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan," + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.086, + 0.476, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.086, + 0.885, + 0.115 + ], + "angle": 0, + "content": "et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.126, + 0.887, + 0.2 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.211, + 0.887, + 0.269 + ], + "angle": 0, + "content": "Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. Token-budget-aware llm reasoning, 2025. URL https://arxiv.org/abs/2412.18547." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.281, + 0.887, + 0.386 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.396, + 0.887, + 0.456 + ], + "angle": 0, + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihindra, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.466, + 0.887, + 0.511 + ], + "angle": 0, + "content": "Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.521, + 0.887, + 0.596 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.606, + 0.887, + 0.665 + ], + "angle": 0, + "content": "MAA. American Invitational Mathematics Examination (AIME). https://maa.org/maa-invitational-competitions/, 2024. Accessed: 2025-03-25." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.676, + 0.887, + 0.765 + ], + "angle": 0, + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.776, + 0.887, + 0.836 + ], + "angle": 0, + "content": "Paul Joe Maliakel, Shashikant Ilager, and Ivona Brandic. Investigating energy efficiency and performance trade-offs in llm inference across tasks and dvfs settings. arXiv preprint arXiv:2501.08219, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.846, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Tyler McDonald, Anthony Colosimo, Yifeng Li, and Ali Emami. Can we afford the perfect prompt? balancing cost and accuracy with the economical prompting index. arXiv preprint arXiv:2412.01690, 2024." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.086, + 0.887, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.058, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.478, + 0.13 + ], + "angle": 0, + "content": "Meta-AI. Llama 3.3 70b instruct model, 2024. URL https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.141, + 0.478, + 0.201 + ], + "angle": 0, + "content": "Samuel Miserendino, Michele Wang, Tejal Patwardhan, and Johannes Heidecke. Swe-lancer: Can frontier llms earn $1 million from real-world freelance software engineering? arXiv preprint arXiv:2502.12115, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.212, + 0.478, + 0.286 + ], + "angle": 0, + "content": "Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicolamaria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.298, + 0.482, + 0.343 + ], + "angle": 0, + "content": "OpenAI. Gpt-4o mini: Advancing cost-efficient intelligence, 2024. URL https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.353, + 0.478, + 0.383 + ], + "angle": 0, + "content": "OpenAI. Openai o3-mini system card, 2025. URL https://openai.com/index/o3-mini-system-card/." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.392, + 0.478, + 0.906 + ], + "angle": 0, + "content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quinonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.482, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.085, + 0.888, + 0.691 + ], + "angle": 0, + "content": "Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan Thibault Sottiaux Thomas Degry Thomas Dimson Tianhao Zheng Timur Garipov Tom Stasi Trapit Bansal. Trevor Creech Troy Peterson Tyna Eloundou Valerie Qi,Vineet Kosaraju,Vinnie Monaco,Vitchyr Pong,Vlad Fomenko Weiyi ZhengWenda ZhouWes McCabe Wojciech ZarembaYann Dubois Yinghai LuYining Chen Young ChaYu BaiYuchen He,Yuchen Zhang,Yunyun Wang,Zheng Shao,and Zhuohan Li. Openai o1 system card2024. URL https://arxiv.org/abs/2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.699, + 0.888, + 0.775 + ], + "angle": 0, + "content": "Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.785, + 0.888, + 0.906 + ], + "angle": 0, + "content": "Alicia Parrish, Angelica Chen, Nikita Nangia, Vishakh Padmakumar, Jason Phang, Jana Thompson, Phu Mon Htut, and Samuel Bowman. BBQ: A hand-built bias benchmark for question answering. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 2086-2105, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.058, + 0.719, + 0.072 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.104, + 0.086, + 0.477, + 0.116 + ], + "angle": 0, + "content": "findings-acl.165. URL https://aclanthology.org/2022.findings-acl.165/." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.127, + 0.476, + 0.188 + ], + "angle": 0, + "content": "David Rein. Can good benchmarks contain mistakes? NYU Alignment Research Group Blog, May 2024. Reveals GPQA expert pay (\\(100/hr) and non-expert solve times (Rein, 2024). Online: wp.nyu.edu/...mistakes." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.198, + 0.476, + 0.275 + ], + "angle": 0, + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof qa benchmark. In First Conference on Language Modeling, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.285, + 0.476, + 0.345 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling lIm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.356, + 0.476, + 0.433 + ], + "angle": 0, + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.443, + 0.476, + 0.504 + ], + "angle": 0, + "content": "TutorCruncher. Average tutoring rates use: How much do tutors charge per hour? TutorCruncher Blog, Feb 2025. Reports \\(45-\\)100/hr as typical range for test-prep tutoring (TutorCruncher, 2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.514, + 0.476, + 0.576 + ], + "angle": 0, + "content": "Upwork. Data entry specialist hourly rates (cost to hire data entry specialist). Upwork Hiring Guide, 2025. Median \\(13/hr for data entry freelancers; \\)10–$20/hr typical range (Upwork, 2025). Accessed Mar 25, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.586, + 0.476, + 0.647 + ], + "angle": 0, + "content": "Junlin Wang, Siddhartha Jain, Dejiao Zhang, Baishakhi Ray, Varun Kumar, and Ben Athiwaratkun. Reasoning in token economies: Budget-aware evaluation of llm reasoning strategies. arXiv preprint arXiv:2406.06461, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.657, + 0.476, + 0.733 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.744, + 0.476, + 0.835 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.845, + 0.476, + 0.905 + ], + "angle": 0, + "content": "Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms. arXiv preprint arXiv:2502.07266, 2025." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.086, + 0.477, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.886, + 0.16 + ], + "angle": 0, + "content": "Wyzant Tutoring. New jersey math tutors cost \\(33 - \\)55 per hour on average. Wyzant.com (tutoring rate listing), 2025. Average private tutoring rates for math (K-12 and competition) (Wyzant Tutoring, 2025). Accessed Mar 25, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.171, + 0.886, + 0.217 + ], + "angle": 0, + "content": "Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.226, + 0.886, + 0.272 + ], + "angle": 0, + "content": "Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for ltm reasoning. arXiv preprint arXiv:2502.18080, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.282, + 0.886, + 0.417 + ], + "angle": 0, + "content": "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Charlotte Zhuang, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. In NeurIPS 2024 Datasets and Benchmarks Track, 2024. Reports human solve rate on GSM8K: 4 problems/15 min (3.7 min each) (Zhang et al., 2024)." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.886, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.495, + 0.935 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.465, + 0.102 + ], + "angle": 0, + "content": "A. Details of Human Expert Cost Estimation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.11, + 0.477, + 0.351 + ], + "angle": 0, + "content": "In this section, we introduce the detailed analysis of how the human expert costs in Table 3 are calculated per dataset. AIME (American Invitational Mathematics Examination) consists of 15 challenging math problems in a 3-hour contest (administered in two separate sections: AIME I & II), giving an average of about 12 minutes per problem (Art of Problem Solving, 2023). In practice, expert math tutors for competitions like AIME command high hourly fees in the range of \\(45 - \\)100, reflecting intensive test-preparation rates (TutorCruncher, 2025). This rate range aligns with specialized test prep tutoring in the US, which is higher than regular tutoring due to the advanced problem-solving skills required (TutorCruncher, 2025). At roughly 12 minutes per AIME question on average, a solver could handle about five such problems per hour under exam conditions (Art of Problem Solving, 2023)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.359, + 0.478, + 0.541 + ], + "angle": 0, + "content": "BBQ (Bias Benchmark for QA) contains short question-answer scenarios targeting social bias. Crowdworkers annotating BBQ have been paid around $15 per hour, a rate chosen to exceed U.S. minimum wage (Parrish et al., 2022). Because each task includes multiple BBQ questions, workers were able to answer roughly 5 questions in 2 minutes (Parrish et al., 2022) - i.e. ~24 seconds per question, or about 0.4 minutes per question. This fast per-question time reflects the fact that BBQ items are short multiple-choice queries, allowing a human annotator to complete approximately 150 BBQ questions in an hour at that pay rate (Parrish et al., 2022)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.548, + 0.476, + 0.729 + ], + "angle": 0, + "content": "GPQA Diamond consists of extremely difficult graduate-level science questions, so human experts demand high compensation. In one case, domain experts were paid about \\(100 per hour to contribute and validate GPQA questions (Rein et al., 2024). These questions are \"Google-proof\" and time-consuming: skilled non-expert participants spent over 30-35 minutes on average per question when attempting to solve GPQA problems with unrestricted web access (Rein et al., 2024). This long duration per question underscores GPQA's complexity – at most 2 questions could be solved in an hour even by motivated annotators, which justifies the premium expert hourly rate (Rein, 2024)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.736, + 0.476, + 0.903 + ], + "angle": 0, + "content": "GSM8K contains grade-school level math word problems. Solving these is relatively time-efficient for adults: in one study, crowdworkers under time pressure managed to solve about 4.07 GSM8K problems in 15 minutes on average (Zhang et al., 2024). That corresponds to roughly 3.7 minutes per question for a human solver. The required skill is comparable to general math tutoring at the K-8 level, for which typical U.S. tutor rates are about \\(33 - \\)55 per hour on platforms like Wyzant (Wyzant Tutoring, 2025). At such a rate, paying a person to solve GSM8K problems would be economical, given that a proficient solver can complete" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.888, + 0.102 + ], + "angle": 0, + "content": "approximately 16 questions in one hour (Zhang et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.108, + 0.889, + 0.32 + ], + "angle": 0, + "content": "MATH500 is a set of 500 advanced competition math problems (drawn from the harder tier of a larger MATH dataset). These problems are similar in difficulty to top-level contest questions such as late AIME or Olympiad qualifying problems. As with AIME, a well-prepared human might spend on the order of 10-15 minutes per problem, roughly \\(\\sim\\)12 minutes on average for a hard competition question (Art of Problem Solving, 2023). Tutors capable of solving and teaching such Olympiad-level problems often charge rates on the order of \\(50 per hour (with a typical range of \\)35- $60 for competition math tutoring) (Wyzant Tutoring, 2025). This implies that solving roughly five MATH500 problems could cost about $50 and take around an hour, consistent with the per-question time and high skill required." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.327, + 0.89, + 0.523 + ], + "angle": 0, + "content": "Two-Digit Addition consists of simple two-digit addition problems, which are very quick for humans to solve. Early elementary students are often expected to complete about 20-25 basic addition problems in one minute in \"mad minute\" drills (Fas, 2021). This corresponds to roughly 2-3 seconds per addition (0.04 minutes per question). Because the task is so elementary, the labor to solve large numbers of such problems can be valued at a lower hourly rate. Simple data-entry style work or basic math tasks on freelance platforms pay on the order of \\(10 - \\)20 per hour (Upwork, 2025). At $15/hour, for example, a worker could theoretically solve several hundred 2-digit additions within the hour, given the ~3-second average solution time (Fas, 2021)." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.543, + 0.702, + 0.558 + ], + "angle": 0, + "content": "B. Details of Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.569, + 0.888, + 0.719 + ], + "angle": 0, + "content": "For each dataset in our evaluation, we sample up to 128 instances and run each model \\( n = 8 \\) times to estimate the expected runtime cost and accuracy per sample. For all models except OpenAI's reasoning models, we set the temperature to 0.7 and top_p to 1.0. In the case of OpenAI's reasoning models, we use a temperature of 1.0 and do not apply top_p. Additionally, we use the default maximum token generation limits provided by each model. Per sample, we employ a concise but descriptive instruction prompt for the models to follow." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.728, + 0.887, + 0.773 + ], + "angle": 0, + "content": "In our experiments, we define the pass \\( r_m(p) \\) as whether the model obtains a correct answer after a single run or not (0 or 1), and the cost \\( c_m(p) \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.779, + 0.887, + 0.796 + ], + "angle": 0, + "content": "\\[\nc _ {m} (p) = n _ {\\text {i n}} (m, p) \\cdot c _ {\\text {i n}} (m) + n _ {\\text {o u t}} (m, p) \\cdot c _ {\\text {o u t}} (m) \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.801, + 0.887, + 0.906 + ], + "angle": 0, + "content": "where \\( n_{*}(m,p) \\) denotes the number of input / output tokens consumed / generated by the model \\( m \\) on problem \\( p \\), and \\( c_{*}(m) \\) denotes the dollar costs per input / output tokens consumed / generated by the model \\( m \\) (see Table 4 for the pricing). For the expert costs, we utilize the estimations from Table 3, and set the rates to the upper-bound value to ensure the approximation of the expert accuracy being 1." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.058, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.082, + 0.875, + 0.235 + ], + "angle": 0, + "content": "
DatasetQualification RequirementsHourly RateTime per QuestionEst. Cost
AIMEAdvanced high-school contest math skills$45–$100~12 minutes$9–$20
BBQGeneral familiarity with social biases$15~0.4 minutes (24 sec)$0.10
GPQA Dia.Graduate-level domain expertise$100~35 minutes$58
GSM8KBasic arithmetic reasoning$33–$55~3.7 minutes$2–$3.50
MATH500Strong competition-level problem-solving$35–$60~12 minutes$7–$12
Two-Digit Add.Basic numeracy$10–$20~0.04 minutes (3 sec)$0.01–$0.02
" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.245, + 0.888, + 0.273 + ], + "angle": 0, + "content": "Table 3: Estimated costs of hiring a human expert to solve one question from each dataset, based on typical qualifications, hourly rates, and time per question." + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.281, + 0.242, + 0.296 + ], + "angle": 0, + "content": "Experiment Prompt" + }, + { + "type": "code", + "bbox": [ + 0.094, + 0.3, + 0.468, + 0.613 + ], + "angle": 0, + "content": "Please solve the following question. You can explain your solution before presenting the final answer. Format your final answer as: ... Instructions: - For multiple-choice: Give only the letter (e.g., (A)). - For numeric: Give only the number (e.g., 42). - For free-response: Provide the full final answer text. INPUT: , , {input} ," + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.64, + 0.271, + 0.656 + ], + "angle": 0, + "content": "C. Additional Results" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.666, + 0.402, + 0.682 + ], + "angle": 0, + "content": "C.1. Expected Accuracy and Inference Costs" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.69, + 0.477, + 0.766 + ], + "angle": 0, + "content": "As discussed in the Section 3.2, we share the results of expected cost and accuracy per model per dataset. We can observe the skewed preference of a particular model family under each metric, implying the inability of expressing economic impact of models through these metrics solely." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.782, + 0.353, + 0.797 + ], + "angle": 0, + "content": "C.2. Relative Gain per Model Release" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.805, + 0.476, + 0.852 + ], + "angle": 0, + "content": "Figure 4 presents the relative improvement in temporal frontier cost-of-pass for each model release, illustrated using bar plots. Namely, we calculate:" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.87, + 0.476, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\frac {G _ {p \\sim D} \\left(\\left\\{m _ {t} \\right\\} , \\mathcal {M} _ {t - 1}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {t - 1}\\right)} \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.281, + 0.887, + 0.417 + ], + "angle": 0, + "content": "The results indicate that the reasoning models demonstrate notable advancements, particularly on complex quantitative tasks. In contrast, lightweight models exhibit marked gains on basic tasks. These findings support the observations from our experiments (Sections 3.2, 3.4). Notably, The substantial improvement observed for GPT-4o is likely due to it being the first model included in our analysis, resulting in a pronounced leap relative to the baseline cost associated with human expert annotation." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.433, + 0.839, + 0.464 + ], + "angle": 0, + "content": "C.3. Counterfactual Frontier Cost-of-Pass in the Absence of a Single Model" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.472, + 0.889, + 0.532 + ], + "angle": 0, + "content": "In this section, following the methodology outlined in Section 3.4, we quantify the relative improvement in frontier cost-of-pass using a counterfactual approach. Specifically, for each model \\(m_{*}\\), we calculate the following:" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.539, + 0.887, + 0.574 + ], + "angle": 0, + "content": "\\[\n\\frac {G _ {p \\sim D} \\left(\\left\\{m _ {*} \\right\\} , \\mathcal {M} _ {T} \\backslash \\left\\{m _ {*} \\right\\}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {T} \\backslash \\left\\{m _ {*} \\right\\}\\right)}, \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.579, + 0.888, + 0.685 + ], + "angle": 0, + "content": "quantifying the essentialness of the model \\( m_* \\). The results presented in Figure 5 demonstrate that the contributions of most individual models are largely compensable by the remaining models. Furthermore, we observe a similar coarse-level trend, as noted in Section 3.4, indicating that different model families provide greater benefits in specific task categories." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.704, + 0.887, + 0.738 + ], + "angle": 0, + "content": "D. Limitations of Our Framework and Future Work Directions" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.748, + 0.888, + 0.792 + ], + "angle": 0, + "content": "In this section, we acknowledge the limitations of the presented framework and propose directions for future improvements and extensions." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.8, + 0.889, + 0.907 + ], + "angle": 0, + "content": "A primary limitation pertains to our definitions and computations of cost \\((C_p(m))\\) and performance \\((R_{p}(m))\\). Specifically, our current cost computation considers only input and output token costs as proxies for the total expense incurred in obtaining correct outputs. This approach neglects indirect or overhead costs associated with generating incorrect outputs, such as subsequent verification costs. Regarding per" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.058, + 0.718, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "table", + "bbox": [ + 0.194, + 0.082, + 0.782, + 0.27 + ], + "angle": 0, + "content": "
CategoryModelRelease DateCost (per million tokens)
Input TokensOutput Tokens
Lightweight ModelsLlama-3.1-8B7/23/2024$0.18$0.18
GPT-4o Mini7/18/2024$0.15$0.60
Llama-3.3-70B12/6/2024$0.88$0.88
Large ModelsLlama-3.1-405B7/23/2024$3.50$3.50
GPT-4o5/13/2024$2.50$10.00
Claude Sonnet-3.56/20/2024$3.00$15.00
Reasoning ModelsOpenAI o1-mini9/12/2024$1.10$4.40
OpenAI o3-mini1/31/2025$1.10$4.40
DeepSeek-R11/20/2025$7.00$7.00
OpenAI o112/5/2024$15.00$60.00
" + }, + { + "type": "table_caption", + "bbox": [ + 0.327, + 0.279, + 0.645, + 0.292 + ], + "angle": 0, + "content": "Table 4: Per-token inference costs with release dates." + }, + { + "type": "table", + "bbox": [ + 0.149, + 0.306, + 0.825, + 0.569 + ], + "angle": 0, + "content": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B89.4575.7821.4817.8737.3012.50
GPT-4o mini99.9088.5753.3218.0770.0214.58
Llama-3.3-70B99.9092.0985.0646.4872.7533.33
Large Models
Llama-3.1-405B99.7193.9585.7444.1467.8731.67
Claude Sonnet-3.5100.0094.4392.5855.3764.7515.83
GPT-4o99.7191.9990.0447.0773.1414.58
Reasoning Models
OpenAI o1-mini99.5192.5885.7449.1285.9453.33
OpenAI o1100.0094.0495.0273.8389.4572.50
DeepSeek-R1100.0093.3683.6954.8893.8560.83
OpenAI o3-mini100.0092.7783.7971.6888.5777.08
" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.578, + 0.885, + 0.594 + ], + "angle": 0, + "content": "Table 5: Accuracy (%) per model per dataset: \\( {R}_{m}\\left( {p \\sim D}\\right) \\) . In each column,the 3 entries with the highest accuracy have blue highlights." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.607, + 0.83, + 0.868 + ], + "angle": 0, + "content": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.2e-57.4e-55.2e-51.8e-41.5e-42.2e-4
GPT-4o mini5.4e-51.9e-41.0e-43.9e-43.7e-45.6e-4
Llama-3.3-70B1.6e-43.3e-43.1e-49.6e-46.7e-41.1e-3
Large Models
Llama-3.1-405B6.9e-41.4e-31.0e-33.0e-32.4e-33.7e-3
Claude Sonnet-3.52.1e-33.7e-33.0e-36.9e-35.9e-37.5e-3
GPT-4o2.3e-34.5e-32.7e-30.018.7e-30.01
Reasoning Models
OpenAI o1-mini5.4e-38.4e-37.6e-30.020.020.07
OpenAI o10.020.030.040.250.130.52
DeepSeek-R11.8e-35.1e-34.6e-30.040.010.04
OpenAI o3-mini1.1e-32.1e-32.6e-30.015.4e-30.02
" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.878, + 0.885, + 0.894 + ], + "angle": 0, + "content": "Table 6: Dollar cost incurred per model per dataset: \\( {C}_{m}\\left( {p \\sim D}\\right) \\) . In each column,the 3 entries with the lowest cost have blue highlights." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.089, + 0.34, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.089, + 0.611, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.089, + 0.878, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.287, + 0.342, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.287, + 0.611, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.637, + 0.287, + 0.877, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.13, + 0.489, + 0.842, + 0.512 + ], + "angle": 0, + "content": "Figure 4: Bar plot showing the percentage of change in frontier cost-of-pass per model release (i.e. \\(\\frac{G_{p\\sim D}(\\{m_t\\},\\mathcal{M}_{t-1})}{V_{p\\sim D}(\\mathcal{M}_{t-1})}\\))" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.519, + 0.477, + 0.7 + ], + "angle": 0, + "content": "formance, the use of accuracy as a binary success-or-failure metric presupposes the existence of a reliable verification pipeline and a practical decision mechanism, potentially oversimplifying scenarios where these assumptions do not hold. Additionally, our cost-of-pass metric, which combines cost and performance, currently does not account for variance information, limiting its practical interpretability in situations where two scenarios with similar cost-of-pass values exhibit substantially different variances. Furthermore, from a practical standpoint, cost modeling could consider alternative units (e.g., latency, inference time, FLOPs), which are currently not analyzed." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.707, + 0.478, + 0.904 + ], + "angle": 0, + "content": "Nevertheless, a significant strength of our framework is its abstract and modular design, facilitating extensions to address these limitations. Future work can enhance the precision of cost computations by integrating additional cost factors, such as verification overheads or indirect costs. Moreover, the framework could be adapted to alternative resource-consumption metrics like latency, inference time, or FLOPs. Regarding performance evaluation, the binary accuracy metric could be replaced or supplemented with alternative success measures tailored to specific scenarios, especially those emphasizing a particular balance between performance and cost. Incorporating variance and other statistical information into cost and performance calculations" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.519, + 0.88, + 0.535 + ], + "angle": 0, + "content": "could also enhance practical usability and interpretability." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.541, + 0.889, + 0.707 + ], + "angle": 0, + "content": "An additional limitation lies in the evaluation methodology, particularly regarding human expert cost estimation. Our framework assumes that experts can reliably solve tasks given sufficient conditions (e.g., adequate qualifications, time, compensation). However, this assumption may not hold for particularly challenging problems or datasets with inherently high uncertainty in achieving correct solutions. Future research could address this limitation by conducting rigorous human subject studies to empirically evaluate and incorporate expert performance variability into the cost estimation process." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.255, + 0.057, + 0.719, + 0.071 + ], + "angle": 0, + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.296, + 0.877, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.683, + 0.888, + 0.71 + ], + "angle": 0, + "content": "Figure 5: The relative improvement \\((\\%)\\) in frontier cost-of-pass under a counterfactual setting, removing a model \\(m_*\\) from the model set \\(\\mathcal{M}_T\\). High values mean that the model is essential for maintaining the current frontier." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.497, + 0.936 + ], + "angle": 0, + "content": "16" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_origin.pdf b/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b7883e181abb5eeee5440b08bc1cd6aadd57e827 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/95bbe2fb-2fa1-456f-8a44-9a7c70d550cc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d386c14b4ef71a5b98e0cb6dcb9a58acfe83febca76c29ab9a029e042686db20 +size 1598438 diff --git a/data/2025/2504_13xxx/2504.13359/full.md b/data/2025/2504_13xxx/2504.13359/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a4c65c37a9ec55075151566ebd08ed9cf36edad6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/full.md @@ -0,0 +1,474 @@ +# Cost-of-Pass: An Economic Framework for Evaluating Language Models + +Mehmet Hamza Erol* 1 Batu El* 1 Mirac Suzgun* 1 Mert Yuksekgonul† 1 James Zou† 1 + +# Abstract + +The widespread adoption of AI systems in the economy hinges on their ability to generate economic value that outweighs their inference costs. Evaluating this tradeoff requires metrics that account for both performance and costs. We propose a framework grounded in production theory for evaluating language models by combining accuracy and inference cost. We introduce cost-of-pass, the expected monetary cost of generating a correct solution. We then define the frontier cost-of-pass as the minimum cost-of-pass achievable across available models or the human-expert, using the approximate cost of hiring an expert. Our analysis reveals distinct economic insights. First, lightweight models are most cost-effective for basic quantitative tasks, large models for knowledge-intensive ones, and reasoning models for complex quantitative problems, despite higher per-token costs. Second, tracking this frontier cost-of-pass over the past year reveals significant progress, particularly for complex quantitative tasks where the cost has roughly halved every few months. Third, to trace key innovations driving this progress, we examine counterfactual frontiers—estimates of cost-efficiency without specific model classes. We find that innovations in lightweight, large, and reasoning models have been essential for pushing the frontier in basic quantitative, knowledge-intensive, and complex quantitative tasks, respectively. Finally, we assess the cost-reductions afforded by common inference-time techniques like majority voting and self-refinement, finding that their marginal accuracy gains rarely justify their costs. Our findings underscore that complementary model-level innovations are the primary drivers of cost-efficiency, and our economic framework provides a principled tool for measuring this progress and guiding deployment. + +# 1. Introduction + +The recent progress in generative AI, particularly language models (LMs), has sparked significant interest in their potential to transform industries, automate cognitive tasks, and reshape economic productivity (Brynolfsson et al., 2025; Eloundou et al., 2024; Acemoglu, 2024). The widespread adoption of these AI systems in the economy hinges on whether the economic benefits generated by the tasks they can perform outweigh the associated inference costs, and whether those inference costs are lower than the cost of equivalent human labor. Consequently, two priorities have emerged at the forefront of LM research: advancing capabilities and reducing costs. These goals, however, often involve trade-offs with more powerful models or test-time techniques that offer higher accuracy at the expense of greater computational and monetary cost (Chen et al., 2024; Parashar et al., 2025; Madaan et al., 2023; Wang et al., 2023; Kapoor et al., 2024). While standard metrics capture accuracy or other system capabilities, they fail to account for cost, leading to an incomplete picture of progress. Ultimately, what matters to the users is not just raw capability, but the value delivered relative to cost and the standard has been to interpret and report these separately. As the ecosystem of models grows, it is essential to assess new models not in isolation, but in the context of a broader ecosystem, where marginal improvements may or may not justify higher costs, and do so in an easy-to-interpret manner. + +To systematically investigate the trade-off between cost and performance and analyze the LM ecosystem as a whole, we draw insights from a well-established and foundational framework from economics: production frontiers. Economists have long studied these frontiers, which map a set of inputs to the maximum output attainable under a given technology (Farrell, 1957). In Farrell's original formulation, a producer is technically efficient if no input can be reduced without lowering output, and price efficient if the input mix minimizes cost given input prices. Together, these conditions yield the lowest possible cost per unit of output. Extending this framework, Aigner et al. (1977) introduced stochastic frontier production functions, in which the relationship between inputs and output is modeled as stochastic rather than deterministic, practically accounting for potential defective outputs that do not pass evaluation criteria due to factors beyond the producer's control. + +# Concepts + +Cost-of-Pass: Expected cost of producing a correct output. + +$$ +v (m, p) = \frac {\mathbb {E} [ \operatorname {c o s t} _ {m} (p) ]}{\mathbb {E} [ \operatorname {a c c u r a c y} _ {m} (p) ]} = \frac {C _ {m} (p)}{R _ {m} (p)} +$$ + +Human Expert Baseline Cost: Cost of hiring a human expert to produce a correct output. + +$$ +v (\mathrm {e x p e r t}, p) \approx C _ {\mathrm {e x p e r t}} (p) +$$ + +Frontier Cost-of-Pass: Lowest cost-of-pass given available set of LMs & human expert baseline. + +$$ +\min \left(V _ {p} (\mathcal {M}), v (\text {e x p e r t}, p)\right) +$$ + +$$ +V _ {p} (\mathcal {M}) = \min _ {m \in \mathcal {M}} v (m, p) \longrightarrow \text {B e s t L M C o s t - o f - P a s s} +$$ + +![](images/8d467f217f9407528afd6c84dbf9877b030a64551f57ce84212c3a7b5172a491.jpg) +(A) Frontier Cost-of-pass & Human Expert Baseline + +![](images/869b9e8e33e8a21c6ac492789fe60d695ff8a85c3c275e6387b51ddd84afe964.jpg) +(B) Progress as Frontier Cost-of-Pass over Time + +![](images/f1e0c2765cf0b0f42a347df84e77cd52bdd634575ea7b0bab1fce2a5610de61a.jpg) +(C) Essentialness of Model Families to Task Categories + +![](images/fbd30ce96b94f3798861605fb3b986a8070b9c16bb602ce9ffd2872f3cda7836.jpg) +(D) Cost Reductions with Inference Time Techniques +Figure 1: Highlights of the cost-of-pass framework and empirical analyses. Core concepts (left) set foundations for: (A) Comparing the Human Expert Baseline to the frontier achieved by the single most effective LM per task category. (B) Tracking the reduction in frontier cost-of-pass over time, indicating progress driven by new model releases (color-coded by family). (C) Quantifying the essential contribution of each model family: lightweight (less than $1 per million tokens), large, and reasoning; to the current cost-efficiency frontier, measured by the percentage of each family's contribution. (D) Assessing the economic benefit (relative cost reduction) achieved by applying common inference-time techniques over the baseline model frontier (which rarely results in meaningful gains). + +These economic concepts are highly relevant to modern LMs, which inherently function as stochastic producers: for a given input, they yield a desired output (e.g., a correct solution) stochastically (Brown et al., 2024). Common practices such as employing scaffolds or more computationally intensive inference techniques (Snell et al., 2024; Madaan et al., 2023; Wang et al., 2023) represent efforts to manipulate this production process. These strategies seek to increase the probability of success but typically do so at the expense of higher computational cost, directly mirroring the economic trade-offs inherent in production efficiency. Motivated by these parallels and the economic goal of minimizing cost per successful output under uncertainty, we develop a quantitative framework tailored to LMs. + +We summarize our contributions as follows. + +Concepts. We introduce cost-of-pass (§2.2), which quantifies the expected monetary cost to achieve a successful output for a given problem. Building on this concept and incorporating a human-expert cost baseline, we define the frontier cost-of-pass as the minimum achievable cost-of-pass across all available options (LMs and human-expert) for that problem. We show these reveal distinct economic niches for model families (e.g., lightweight vs. reasoning models) on different tasks, which accuracy comparisons alone obscure (§3.2). + +Tracking progress with frontier cost-of-pass. Using the + +cost-of-pass and frontier cost-of-pass, we analyze economic improvements across three task categories from May 2024 to February 2025. We observe an exponential decrease in frontier cost-of-pass across all tasks, though the trends vary. Notably, we observe that, over the past year, the expected cost of generating a correct solution to complex quantitative problems has been cut in half every few months. We find that the frontier cost-of-pass is driven primarily by lightweight models and reasoning models (§3.3). + +Counterfactual frontier in the absence of model families. We show that our analysis reveals the complementary roles of different model types in driving recent progress. Innovations in lightweight models have been instrumental in reducing costs on basic quantitative tasks. Large models, by contrast, have been most impactful for knowledge-based benchmarks like GPQA Diamond (Rein et al., 2024). Meanwhile, reasoning models have been central to advances on complex quantitative reasoning challenges such as AIME (MAA, 2024) and MATH (Hendrycks et al., 2021) ( $\S$ 3.4). + +Impact of post-hoc inference time techniques. We observe that common test-time techniques such as self-refinement (Madaan et al., 2023) and majority voting (self-consistency; Wang et al., 2022) to improve performance offer either limited or no economic benefits, indicating that the recent reductions in frontier cost-of-pass have been mostly driven by model-level innovations (§ 3.5). + +# 2. Setup + +# 2.1. Economic Theory of Production Efficiency + +Classical production theory examines how producers convert inputs into outputs efficiently. Given a set of producers $\mathcal{F} = \{f_0, \dots, f_{n-1}\}$ , we are often interested in the maximum output attainable for a given combination of inputs. If producing $u \in \mathbb{R}_{>0}$ units of output requires an input vector $\mathbf{x} \in \mathbb{R}_{\geq 0}^k$ (e.g., quantities of different resources), the input requirement set $P_u$ contains all input vectors capable of producing at least $u$ units: + +$$ +P _ {u} = \left\{\mathbf {x} \mid \max _ {f _ {i} \in \mathcal {F}} f _ {i} (\mathbf {x}) \geq u \right\}. \tag {1} +$$ + +Based on this input requirement and a vector $\mathbf{w_i} \in \mathbb{R}_{\geq 0}^k$ being the prices of the inputs (incurred by each producer $i$ ), the frontier cost for producing $u$ units of output is the minimum cost required: + +$$ +V _ {u} = \min _ {\mathbf {x} \in P _ {u}, f _ {i} \in \mathcal {F}} \mathbf {w} _ {\mathbf {i}} ^ {T} \mathbf {x}, \tag {2} +$$ + +subject to $f_{i}(\mathbf{x}) \geq u$ implicitly included in $\mathbf{x} \in P_u$ . This $V_{u}$ quantifies the lowest possible cost to achieve output $u$ given the available production technologies $(\mathcal{F})$ and input prices $(\mathbf{w_i})$ . Farrell (1957) used these core concepts to build definitions for technical and price efficiency in a production ecosystem for producers. Critically, Aigner et al. (1977) extended this framework to handle stochastic production functions, where output is probabilistic for a given input. + +Building on this economic foundation, we adapt the core concept of a frontier cost $(V_{u})$ to represent the minimum achievable cost for obtaining a correct solution using LMs. Recognizing that a key aspect of LM behavior is its inherent stochasticity, an issue long addressed in economic production theory (Aigner et al., 1977), we incorporate this variability into our cost-efficiency metric. This enables us to align our framework with core production concepts and assess the economic impact of stochastic LM producers. + +# 2.2. Cost-of-Pass: An Efficiency Metric for LMs + +Here we instantiate the economic framework for language models (LMs). Consider a specific problem $p$ , where the unit of production is a correct solution. We define a model $m$ as an inference pipeline using an LM, acting as a stochastic producer. Two quantities characterize its efficiency on problem $p$ : + +$R_{m}(p) = \mathrm{Prob.}$ of $m$ producing a correct answer on $p$ + +$C_m(p) = \text{Expected cost of one inference attempt by } m \text{ on } p$ . + +In the context of LMs, the inputs $\mathbf{x}$ correspond to resources like prompt and generated tokens, while the input prices $\mathbf{w}$ represent the costs per token charged by the provider. The + +total cost of these inputs for a single inference attempt by model $m$ on problem $p$ is captured by $C_m(p)$ , effectively instantiating the term $\mathbf{w}^T\mathbf{x}$ from the theory in the previous section. + +Since the model output is stochastic, the expected number of attempts to obtain the first correct solution is $1 / R_{m}(p)$ , assuming independent trials. This yields the cost-of-pass, defined as the expected monetary cost to obtain one correct solution for problem $p$ : + +$$ +v (m, p) = \frac {C _ {m} (p)}{R _ {m} (p)}. \tag {3} +$$ + +The cost-of-pass integrates both performance $(R_{m}(p))$ and cost $(C_m(p))$ into a single economically interpretable metric: it quantifies how efficiently financial resources are converted into correct outputs. This formulation mirrors classical production theory, where the goal is to assess the cost of achieving a specific target output (Farrell, 1957); in our case, the target is a correct solution. When a model cannot produce one $(R_{m}(p) = 0)$ , the cost-of-pass becomes infinite, appropriately signaling infeasibility. + +# 2.3. The LM Frontier Cost-of-Pass + +While cost-of-pass (§ 2.2) evaluates a single model's efficiency, understanding the overall state of LM capabilities for a given problem requires assessing the collective performance of the entire available LM ecosystem. Therefore, analogous to the frontier cost $V_{u}$ (Eq. 2), we define the $LM$ frontier cost-of-pass for problem $p$ as the minimum cost-of-pass achievable using any available LM strategy $m$ from the set $\mathcal{M}$ : + +$$ +V _ {p} (\mathcal {M}) = \min _ {m \in \mathcal {M}} v (m, p). \tag {4} +$$ + +$V_{p}(\mathcal{M})$ quantifies the minimum expected cost to solve problem $p$ using the most cost-effective model currently available within the set $\mathcal{M}$ . If no LM in $\mathcal{M}$ can solve $p$ (i.e., $R_{m}(p) = 0$ for all $m\in \mathcal{M}$ ), then $V_{p}(\mathcal{M}) = \infty$ . + +# 2.4. Grounding Evaluation: Estimated Human-Expert Baseline + +The LM frontier cost-of-pass $V_{p}(\mathcal{M})$ reveals the best LM performance but lacks context: it does not show if LMs are economically advantageous over human labor. Moreover, the LM frontier cost-of-pass can be infinite if no LM succeeds. To address both, we introduce human-expert baseline as a reference point, by considering a human-expert annotator as a specific strategy: $m_{\mathrm{expert}}$ . Let $\mathcal{M}_0 = \{m_{\mathrm{expert}}\}$ represent this baseline set. We assume experts typically achieve near-perfect correctness $(R_{\mathrm{expert}}(p) \approx 1)$ for tasks they are qualified for. Thus, the cost-of-pass for a qualified + +expert is approximately their labor cost per problem: + +$$ +v (\text {e x p e r t}, p) \approx C _ {\text {e x p e r t}} (p). \tag {5} +$$ + +The estimation of $C_{\mathrm{expert}}(p)$ involves considering required expertise, time per problem, and appropriate compensation rates (detailed in § 2.6.1). By incorporating this baseline, we define the frontier cost-of-pass for problem $p$ , considering both LMs $(\mathcal{M})$ and the human-expert alternative $(\mathcal{M}_0)$ : + +$$ +V _ {p} (\mathcal {M} \cup \mathcal {M} _ {0}) = \min \left(V _ {p} (\mathcal {M}), v (\text {e x p e r t}, p)\right). \tag {6} +$$ + +This frontier cost-of-pass represents the true minimum expected cost to obtain a correct solution for problem $p$ using the best available option, whether it's an LM or a human. Crucially, $V_{p}(\mathcal{M} \cup \mathcal{M}_{0})$ is always finite (assuming finite human-expert cost and capability). + +# 2.5. Measuring Progress and Value Gain + +To track improvements against the best available option over time, let $\mathcal{M}_t$ denote the total set of available strategies at time $t$ , encompassing both the set of LM strategies released up to time $t$ and the human-expert baseline $\mathcal{M}_0$ , that is, $\mathcal{M}_t = \{m_{\leq t}\} \cup \mathcal{M}_0$ . The frontier cost-of-pass achievable at time $t$ can be calculated as: + +$$ +V _ {p} \left(\mathcal {M} _ {t}\right) = \min _ {m \in \mathcal {M} _ {t}} v (m, p). \tag {7} +$$ + +As new LM models $\{m_t\}$ are released, the set expands such that $\mathcal{M}_t = \mathcal{M}_{t - 1} \cup \{m_t\}$ . Consequently, the frontier cost-of-pass $V_{p}(\mathcal{M}_{t})$ forms a non-increasing sequence over time $t$ , tracking the reduction in the minimum cost needed to solve a particular problem $p$ . + +To quantify the economic impact of new developments, we define the gain. When a new set of models $\{m_t\}$ becomes available at time $t$ (often a single model), the gain for problem $p$ is the reduction it causes in the frontier cost-of-pass: + +$$ +G _ {p} \left(\left\{m _ {t} \right\}, \mathcal {M} _ {t - 1}\right) = V _ {p} \left(\mathcal {M} _ {t - 1}\right) - V _ {p} \left(\mathcal {M} _ {t - 1} \cup \left\{m _ {t} \right\}\right). \tag {8} +$$ + +Note that $G_{p}$ measures how much cheaper the new model(s), $\{m_t\}$ , make solving $p$ compared to prior best options, including humans. Hence, a large $G_{p}$ value indicates a significant economic contribution in solving $p$ . This notion underlies our experiments, analyzing the value generated by models relative to the human baseline and tracking the evolution of the overall frontier. + +Extending to a distribution. Although measuring frontier cost-of-pass and value gain for individual problems can be informative, particularly through a fine-grained perspective, we often care about more than a single instance. Let $P \sim D$ be a set of problems sampled from a problem distribution $D$ . + +We can then extend our definitions for such a distribution through the following: + +$$ +V _ {p \sim D} (\mathcal {M} _ {t}) = \mathbb {E} _ {p \sim D} [ V _ {p} (\mathcal {M} _ {t}) ], \tag {9} +$$ + +$$ +G _ {p \sim D} (\{m _ {t} \}, \mathcal {M} _ {t - 1}) = \mathbb {E} _ {p \sim D} [ G _ {p} (\{m _ {t} \}, \mathcal {M} _ {t - 1}) ]. \tag {10} +$$ + +# 2.6. Estimating the Economic Efficiency + +To operationalize our overall framework for any given distribution of problems, we introduce the following recipe: + +(1) Estimate success rates. For each model-problem pair $(m,p)$ , generate a number of independent attempts to approximate $R_{m}(p)$ . We use the same prompt and model settings across these attempts, varying only factors necessary to ensure independence (e.g., internal sampling randomness). +(2) Estimate per-attempt cost. Track the average number of tokens (prompt + generation) consumed per attempt, multiply by the current token price (which can differ by model provider or usage level), and add any extra charges (e.g., third-party API calls, external reasoning modules, etc.). This sum yields $C_m(p)$ . +(3) Compute cost-of-pass. For each model $m$ , calculate $v(m, p) = C_m(p) / R_m(p)$ . ( $R_m(p) = 0$ yields $v(m, p) = \infty$ .) +(4) Determine frontier cost-of-pass. Estimate human-expert cost $v(\text{expert}, p)$ (see below). Find $V_{p}(\mathcal{M} \cup \mathcal{M}_{0})$ for a given set of strategies $\mathcal{M}$ . +(5) Analyze over benchmarks. Aggregate $V_{p}(\mathcal{M})$ across problems $p \sim D$ to get $V_{p \sim D}(\mathcal{M}_t)$ . Track progress over time (for $\mathcal{M}_t$ ) and compute gain $G_{p \sim D}$ for new models. + +# 2.6.1. Estimating Human-Expert Cost + +To estimate $v(\text{expert}, p)$ , the plausible cost of obtaining a correct human-expert answer, we systematically determine the required qualifications, appropriate hourly compensation, and average time for a typical problem $p$ per dataset. We determine these quantities based on a hierarchy of evidence by prioritizing the dataset's creation process or associated studies (e.g., reported annotation pay/time (Parrish et al., 2022)). When direct data is absent, we leverage findings from closely related work (Zhang et al., 2024) or infer parameters from the dataset's context (e.g., deriving time-per-problem from contest rules (Art of Problem Solving, 2023)). Compensation rates are informed by reported study payments (Rein, 2024) or relevant market rates for comparable expertise (e.g., specialized tutoring rates (TutorCruncher, 2025; Wyzant Tutoring, 2025)).1 + +
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.8e-50.192.7e-218.583.3815.33
GPT-4o mini5.4e-50.221.3e-225.382.0614.67
Llama-3.3-70B1.6e-40.167.4e-318.581.3110.67
Large Models
Llama-3.1-405B6.9e-40.146.7e-310.431.138.67
Claude Sonnet-3.52.1e-30.196.4e-314.062.5414.67
GPT-4o2.3e-30.176.2e-314.070.9614.01
Reasoning Models
OpenAI o1-mini5.4e-30.171.3e-212.270.504.80
OpenAI o11.9e-20.224.3e-28.070.902.85
DeepSeek-R11.8e-30.171.5e-214.570.213.41
OpenAI o3-mini1.1e-30.111.1e-28.180.762.03
+ +Table 1: Frontier dollar cost-of-pass per model / dataset. Each entry is the expected dollar cost of a problem $p \sim D$ with the presence of the model $m$ and a human expert: $V_{p \sim D}(\{m\} \cup \mathcal{M}_0)$ . Per column, the 3 entries with the lowest value (i.e. best frontier cost-of-pass) have blue highlights. Different model families emerge as cost-effective at different task categories, highlighting the strengths of our evaluation. + +# 3. Experiments + +# 3.1. Models and Datasets + +Models. We consider three categories of models: + +(1) Lightweight models: We use the per-token cost as a proxy and select models with a cost less than $1 per million input and output tokens (see Table 4): Llama-3.1-8B (Grattafori et al., 2024), GPT-4o mini (OpenAI, 2024), and Llama-3.3-70B (Meta-AI, 2024). +(2) Large models: We select large general-purpose LMs: Llama-3.1-405B (Grattafiori et al., 2024), Claude Sonnet-3.5 (Anthropic, 2024), and GPT-4o (Hurst et al., 2024). +(3) Reasoning models: We select models with special reasoning post-training, including OpenAI's o1-mini (OpenAI et al., 2024), o1 (OpenAI et al., 2024), and o3-mini (OpenAI, 2025), as well as DeepSeek R1 (Guo et al., 2025). + +Within each category, we select three to four representative models released between the second half of 2024 and early 2025. To preserve the integrity of our temporal analysis, we prioritize the earliest stable releases and exclude research previews or experimental versions. + +Datasets. We evaluate models across three sets of tasks: + +(1) Basic quantitative tasks: These involve basic numerical reasoning. We include an arithmetic dataset (Two Digit Addition) to assess basic numerical computation, and GSM8K (Cobbe et al., 2021) to evaluate multi-step grade-school level problem solving. +(2) Knowledge-based tasks: These require recalling and reasoning over factual knowledge. We include a scientific knowledge-intensive question answering task (GPQA-Diamond (Rein et al., 2024)) to evaluate models' abl + +ity to recall and utilize complex scientific facts, and a bias benchmark (BBQ (Parrish et al., 2022)) to evaluate whether models rely on stereotypical knowledge or can disambiguate factual responses from biased defaults. +(3) Complex quantitative reasoning tasks: These require complex mathematical reasoning and problem solving. We use MATH-500 (Hendrycks et al., 2021; Lightman et al., 2023) to assess models on competition-level maths problems, and AIME24 (MAA, 2024) to evaluate performance on challenging problems from the 2024 American Invitational Mathematics Examination. + +# 3.2. Frontier Cost-of-Pass with a Single Model + +In this experiment, we aim to quantify the economic value each model $m$ generates on different distributions of problems $p \sim D$ . For this, we take human-expert as a baseline and quantify the frontier cost-of-pass of a problem in the presence of the model $m$ : $V_{p \sim D}(\{m\} \cup \mathcal{M}_0)$ . + +The results in Table 1, highlighting the top three costs, show that our frontier cost-of-pass effectively captures how different model families offer economic advantages across various task categories. We find that lightweight models yield the lowest frontier cost-of-pass on basic quantitative tasks, such as Two Digit Addition. This is expected, as all model families achieve high accuracy on this dataset, making the least expensive models the most cost-effective. In contrast, for knowledge-based tasks, larger models achieve a lower frontier cost-of-pass compared to lightweight ones. While the reasoning models, such as o1, are priced significantly more expensively compared to both large and lightweight models, they lead to significant performance improvements, which, overall, result in reductions in the cost-of-pass mainly in complex quantitative tasks. + +![](images/50036048d55bd58bd00e762774ed0bdd190e68504cb77bf06c5f2d7a0774f944.jpg) +Two Digit Addition + +![](images/13635eb971e0c3632f92759d60ba47cf2eea72475e047a2d710ad29a2d1c717f.jpg) +BBQ + +![](images/482987eb0bc339f8ccfedce3697ce3ff1811641a2ba6b8966508ad35ca724c8a.jpg) +MATH500 + +![](images/326a80519a5e711cd95c953ca4e4bfdf62a1180de18fd344f54905a9190e9426.jpg) +GSM8K +Figure 2: The frontier dollar cost-of-pass (i.e. $V_{p\sim D}(\mathcal{M}_t)$ steadily decreases with new model releases, spanning models released between May 2024 and February 2025. Y-axes are normalized (divided by $V_{p\sim D}(\mathcal{M}_0)$ , shown in percentage (%)). + +![](images/832bfe627fe748fd7b35bea328fab3574c3707e52b56a17298ca44617e7fc88e.jpg) +GPQA Diamond + +![](images/3be70cbe724498608dfb9db5e73a4241a5c2f156493a7e6794bc974ffe034290.jpg) +AIME 2024 + +In contrast, when either task performance $(R_{m}(p\sim D))$ or cost $(C_m(p\sim D)$ is solely taken into account (Tables 5 and 6) such metrics tend to favor either reasoning models or lightweight models respectively due to their significant edge per criteria, without assessing the nuances in the economic impact they induce. This effectively highlights the sophistication of our metric and evaluation framework. + +# 3.3. Tracking Frontier Cost-of-Pass with New Releases + +In this experiment, we track the improvements on the frontier cost-of-pass for a problem. Figure 2 shows the trends of the cumulative gain per dataset $(V_{p\sim D}(\mathcal{M}_t))$ , each updated by the corresponding model release $(\mathcal{M}_{t - 1}\cup \{m_t\})$ . We observe a steady decline in the frontier cost-of-pass for complex quantitative tasks. In contrast, knowledge-based and basic quantitative tasks typically exhibit a sharp initial drop in frontier cost-of-pass with the early releases of models, followed by a plateau. To quantify the cost reduction trends, we empirically fit an exponential decay curve of the form: + +$$ +V _ {p} \left(M _ {t}\right) \approx a e ^ {- b t} + c, \tag {11} +$$ + +where $t$ denotes time in months since the first model release, and $a$ , $b$ , and $c$ are fit parameters. From this, we compute the time for the exponential component of the cost to drop by $50\%$ : $T_{1/2} = \ln(2)/b$ . Using this formulation, we find that for complex quantitative tasks, between May 2024 and February 2025, the frontier cost-of-pass for MATH500 halved approximately every 2.6 months, whereas for AIME + +2024, the halving time was 7.1 months—indicating consistent cost reductions over the past year. + +# 3.4. Essentialness of Model Families: Counterfactual Frontier Cost-of-Pass + +Section 3.3 showed the frontier cost-of-pass decreasing over time with new model releases. To understand which model families were most critical to this progress, we conduct a counterfactual analysis that quantifies the impact of removing each family. Defining $\mathcal{M}_g$ as a family of models (lightweight, large, or reasoning), we measure the counterfactual contribution of family $g$ on dataset $D$ by calculating the relative improvement in frontier cost-of-pass attributable to its inclusion: + +$$ +\frac {G _ {p \sim D} \left(\mathcal {M} _ {g} , \mathcal {M} _ {T} \backslash \mathcal {M} _ {g}\right)}{V _ {p \sim D} \left(\mathcal {M} _ {T}\right)}. \tag {12} +$$ + +Here, $\mathcal{M}_T$ includes all models used in our experiments. This metric represents the relative improvement in the final frontier cost-of-pass $V_{p\sim D}(\mathcal{M}_T)$ attributable to the model family $\mathcal{M}_g$ , with higher values indicating greater essentialness of that family for achieving the current frontier. + +Figure 3 illustrates our main findings, revealing distinct roles across model families. Lightweight models help reduce the frontier cost-of-pass on basic quantitative tasks, while large models drive performance on knowledge-intensive tasks. Reasoning models play a key role in advancing the frontier for complex quantitative reasoning and also improve + +![](images/9d65079fdb587bed54bc16355915a89bba1090a93865eeaf66281756361e237d.jpg) +Figure 3: The relative improvement $(\%)$ in frontier cost-of-pass attributable to each model family $g$ , calculated under a counterfactual setting where $\mathcal{M}_g$ is removed. Higher values signify greater essentialness for maintaining the current frontier. + +
Inference Time TechniqueBasic QuantitativeKnowledge BasedComplex Quantitative
Two Digit AdditionGSM8KBBQGPQA DiamondMATH500AIME24
Self-Refine006.724.900
Maj. Vote (k=3)000000
Maj. Vote (k=4)000000
+ +Table 2: Relative performance gains (%) from different inference time techniques across datasets. + +performance on GPQA-Diamond, as well as GSM8K, which benefits from small reasoning models like o3-mini. + +These findings highlight that progress on different task types is driven by different model paradigms. While large models have brought clear gains on knowledge-intensive tasks (e.g., GPQA), recent improvements in cost-efficiency—especially in more quantitative domains—appear largely driven by advances in lightweight and reasoning models. Together, these results suggest that the current cost-efficiency frontier, as reflected in our framework, is shaped mainly by (i) lightweight models and (ii) reasoning models. + +# 3.5. Impact of Inference Time Techniques on Frontier Cost-of-Pass + +We now assess whether common inference-time techniques provide meaningful economic benefits. Specifically, we ask: is it cost-effective to improve model performance through these techniques, compared to relying on the models' baseline performance? To explore this, we focus on the set of lightweight and large models, denoted by $\mathcal{M}_L$ . + +First, we determine the frontier cost-of-pass achieved by $\mathcal{M}_L$ without any modifications. We then apply a given inference-time technique uniformly across all models in $\mathcal{M}_L$ , yielding a modified set $\mathcal{M}_L^*$ . The gain from this technique, measured relative to the original frontier cost-of-pass, can be computed as follows: + +$$ +\frac {G _ {p \sim D} \left(\mathcal {M} _ {L} ^ {*} , \mathcal {M} _ {L}\right)}{V _ {p \sim D} \left(\mathcal {M} _ {L}\right)}. \tag {13} +$$ + +In this study, we consider two popular techniques: self- + +refinement (Madaan et al., 2023) and majority voting (a.k.a. self-consistency; Wang et al., 2023), with 3 and 4 votes. + +As shown in Table 2, self-refinement shows moderate economic benefit on knowledge-intensive tasks, with a notable $24.9\%$ improvement on GPQA Diamond. In contrast, majority voting—despite potentially enhancing raw accuracy—does not offer relative economic improvement across the tested models and datasets. + +Collectively, these findings suggest, at least for the evaluated techniques, that the increased computational costs generally outweigh the performance benefits relative to the frontier cost-of-pass established by the baseline models. This implies that these common inference-time approaches may not be sufficient on their own to yield significant economic benefits within our evaluation framework for now. + +# 4. Related Works + +Economic perspectives and broader impacts. The efficiency of LMs carries significant economic implications, as they are viewed as general-purpose technologies impacting productivity and labor (Eloundou et al., 2024; Brynjolfsson et al., 2025). Complementary economic analyses explore provider strategies regarding pricing and product design Bergemann et al. (2025), and user-side decision-making involving ROI, token costs, and success probabilities. + +Our cost-of-pass metric serves as a crucial bridge between these technical realities of model performance and their economic consequences. By providing a fundamental measure, the expected monetary cost to successfully complete + +a task, it allows for quantifying the economic contribution of specific AI systems and informs rational model selection for achieving economic viability, and provides quantitative perspective on the economic evolution of the LM ecosystem. + +LM resource consumption, efficiency optimization and benchmarking. Research increasingly recognizes the importance of LM resource consumption and efficiency. Studies have quantified operational costs like tokens (Chen et al., 2023) and energy (Maliakel et al., 2025), revealing task-dependent performance and potential diminishing returns from high expenditure (Miserendino et al., 2025). This focus has intensified with the rise of reasoning methodologies (Sui et al., 2025) and inference-time techniques (e.g., Madaan et al. (2023); Wang et al. (2023)), which often trade increased computational cost for potential accuracy gains. + +Concerns like "overthinking," where lengthy processing fails to improve results (Chen et al., 2024; Cuadron et al., 2025), have spurred efforts to optimize resource use through methods like dynamic token budgeting (Han et al., 2025), specialized training (Arora & Zanette, 2025), prompt engineering (Xu et al., 2025; Aytes et al., 2025) or researching optimal reasoning lengths (Wu et al., 2025; Yang et al., 2025). Concurrently, evaluation methodologies have evolved beyond pure accuracy or correctness measures. + +Recognizing its insufficiency, researchers have incorporated cost via fixed budgets (Wang et al., 2024), performance heuristics (McDonald et al., 2024), or non-monetary metrics like conciseness (Nayab et al., 2024). Kapoor et al. (2024) strongly advocated for using real dollar costs and accounting for stochasticity—factors central to our approach. Benchmarking efforts have also highlighted diminishing returns from simply scaling inference computation (Parashar et al., 2025). While these works underscore the need for cost-aware analysis, they often rely on specific constraints (e.g., fixed budgets) or heuristic metrics. + +Our cost-of-pass framework seeks to advance this by providing a single, interpretable metric grounded in economic production principles, offering a unified way to assess the economic viability of different models and techniques without predefined budget assumptions or proxy metrics. + +# 5. Conclusion + +We introduced an economic framework designed to evaluate language models by integrating their performance with inference cost. Drawing from production theory, we conceptualize language models as stochastic producers, and assess their efficiency using our proposed cost-of-pass metric, which measures the expected cost per correct solution. Our analysis utilizes this metric alongside the frontier cost-of-pass, defined as the minimum achievable cost compared to an human expert baseline. This approach reveals distinct + +economic roles played by different model classes. For instance, retrospective and counterfactual evaluations demonstrate that lightweight models primarily drive efficiency on basic tasks, whereas reasoning models are essential for complex problem-solving. Critically, our findings show that common inference-time techniques typically increase the cost-of-pass, thus failing to provide net economic benefits when compared to the progress made by improving the underlying models themselves. In conclusion, our framework offers a principled foundation for measuring language model innovation in economic terms. It serves as a valuable tool for guiding model selection and aligning AI development with real-world value. + +# Acknowledgments + +We thank Federico Bianchi, Dan Jurafsky, Daniel E. Ho, Can Yesildere, and Semyon Lomasov for valuable comments and discussions in the early stages of this project. MHE gratefully acknowledges support from the Fulbright Foreign Student Program. BE gratefully acknowledges the support of the Stanford Knight-Hennessy Scholarship. MS gratefully acknowledges the support of an HAI-SAP Fellowship. + +# References + +1st grade 4th quarter expectations – fast facts timed tests. Elementary School Curriculum Note (online PDF), 2021. States 20–25 addition problems should be solved in 1 minute (2–3 sec each) (Fas, 2021). +Daron Acemoglu. The Simple Macroeconomics of AI. NBER Working Papers 32487, National Bureau of Economic Research, Inc, May 2024. URL https://ideas.repec.org/p/nbr/nberwo/32487.html. +Dennis Aigner, C.A.Knox Lovell, and Peter Schmidt. Formulation and estimation of stochastic frontier production function models. Journal of Econometrics, 6(1):21-37, 1977. ISSN 0304-4076. doi: https://doi.org/10.1016/0304-4076(77)90052-5. URL https://www.sciencedirect.com/science/article/pii/0304407677900525. +Anthropic. Claude 3.5 sonnet announcement, 2024. URL https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 13 Feb. 2025. +Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025. +Art of Problem Solving. American Invitational Mathematics Examination (AIME) Format. AoPS Wiki (aops.com), 2023. States AIME is 15 questions in 3 hours (12 min + +per problem) (Art of Problem Solving, 2023). Accessed Mar 25, 2025. +Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179, 2025. +Dirk Bergemann, Alessandro Bonatti, and Alex Smolin. The economics of large language models: Token allocation, fine-tuning, and optimal pricing. arXiv preprint arXiv:2502.07736, 2025. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787. +Erik Brynjolfsson, Danielle Li, and Lindsey Raymond. *Generative ai at work.* The *Quarterly Journal of Economics*, pp. qjae044, 2025. +Lingjiao Chen, Matei Zaharia, and James Zou. Frugalgpt: How to use large language models while reducing cost and improving performance. arXiv preprint arXiv:2305.05176, 2023. +Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2+$ $3=$ ? on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025. +Tyna Eloundou, Sam Manning, Pamela Mishkin, and Daniel Rock. Gpts are gpts: Labor market impact potential of llms. Science, 384(6702):1306-1308, 2024. +Michael James Farrell. The measurement of productive efficiency. Journal of the royal statistical society: series A (General), 120(3):253-281, 1957. +Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, + +et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. Token-budget-aware llm reasoning, 2025. URL https://arxiv.org/abs/2412.18547. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe. +Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihindra, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024. +Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023. +MAA. American Invitational Mathematics Examination (AIME). https://maa.org/maa-invitational-competitions/, 2024. Accessed: 2025-03-25. +Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023. +Paul Joe Maliakel, Shashikant Ilager, and Ivona Brandic. Investigating energy efficiency and performance trade-offs in llm inference across tasks and dvfs settings. arXiv preprint arXiv:2501.08219, 2025. +Tyler McDonald, Anthony Colosimo, Yifeng Li, and Ali Emami. Can we afford the perfect prompt? balancing cost and accuracy with the economical prompting index. arXiv preprint arXiv:2412.01690, 2024. + +Meta-AI. Llama 3.3 70b instruct model, 2024. URL https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct. +Samuel Miserendino, Michele Wang, Tejal Patwardhan, and Johannes Heidecke. Swe-lancer: Can frontier llms earn $1 million from real-world freelance software engineering? arXiv preprint arXiv:2502.12115, 2025. +Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicolamaria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024. +OpenAI. Gpt-4o mini: Advancing cost-efficient intelligence, 2024. URL https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/. +OpenAI. Openai o3-mini system card, 2025. URL https://openai.com/index/o3-mini-system-card/. +OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quinonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan + +Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan Thibault Sottiaux Thomas Degry Thomas Dimson Tianhao Zheng Timur Garipov Tom Stasi Trapit Bansal. Trevor Creech Troy Peterson Tyna Eloundou Valerie Qi,Vineet Kosaraju,Vinnie Monaco,Vitchyr Pong,Vlad Fomenko Weiyi ZhengWenda ZhouWes McCabe Wojciech ZarembaYann Dubois Yinghai LuYining Chen Young ChaYu BaiYuchen He,Yuchen Zhang,Yunyun Wang,Zheng Shao,and Zhuohan Li. Openai o1 system card2024. URL https://arxiv.org/abs/2412.16720. +Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025. +Alicia Parrish, Angelica Chen, Nikita Nangia, Vishakh Padmakumar, Jason Phang, Jana Thompson, Phu Mon Htut, and Samuel Bowman. BBQ: A hand-built bias benchmark for question answering. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 2086-2105, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022. + +findings-acl.165. URL https://aclanthology.org/2022.findings-acl.165/. +David Rein. Can good benchmarks contain mistakes? NYU Alignment Research Group Blog, May 2024. Reveals GPQA expert pay (\(100/hr) and non-expert solve times (Rein, 2024). Online: wp.nyu.edu/...mistakes. +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof qa benchmark. In First Conference on Language Modeling, 2024. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling lIm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. +Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419, 2025. +TutorCruncher. Average tutoring rates use: How much do tutors charge per hour? TutorCruncher Blog, Feb 2025. Reports $45-$ 100/hr as typical range for test-prep tutoring (TutorCruncher, 2025). +Upwork. Data entry specialist hourly rates (cost to hire data entry specialist). Upwork Hiring Guide, 2025. Median $13/hr for data entry freelancers;$ 10–$20/hr typical range (Upwork, 2025). Accessed Mar 25, 2025. +Junlin Wang, Siddhartha Jain, Dejiao Zhang, Baishakhi Ray, Varun Kumar, and Ben Athiwaratkun. Reasoning in token economies: Budget-aware evaluation of llm reasoning strategies. arXiv preprint arXiv:2406.06461, 2024. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw. +Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms. arXiv preprint arXiv:2502.07266, 2025. + +Wyzant Tutoring. New jersey math tutors cost $33 -$ 55 per hour on average. Wyzant.com (tutoring rate listing), 2025. Average private tutoring rates for math (K-12 and competition) (Wyzant Tutoring, 2025). Accessed Mar 25, 2025. +Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025. +Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for ltm reasoning. arXiv preprint arXiv:2502.18080, 2025. +Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Charlotte Zhuang, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. In NeurIPS 2024 Datasets and Benchmarks Track, 2024. Reports human solve rate on GSM8K: 4 problems/15 min (3.7 min each) (Zhang et al., 2024). + +# A. Details of Human Expert Cost Estimation + +In this section, we introduce the detailed analysis of how the human expert costs in Table 3 are calculated per dataset. AIME (American Invitational Mathematics Examination) consists of 15 challenging math problems in a 3-hour contest (administered in two separate sections: AIME I & II), giving an average of about 12 minutes per problem (Art of Problem Solving, 2023). In practice, expert math tutors for competitions like AIME command high hourly fees in the range of $45 -$ 100, reflecting intensive test-preparation rates (TutorCruncher, 2025). This rate range aligns with specialized test prep tutoring in the US, which is higher than regular tutoring due to the advanced problem-solving skills required (TutorCruncher, 2025). At roughly 12 minutes per AIME question on average, a solver could handle about five such problems per hour under exam conditions (Art of Problem Solving, 2023). + +BBQ (Bias Benchmark for QA) contains short question-answer scenarios targeting social bias. Crowdworkers annotating BBQ have been paid around $15 per hour, a rate chosen to exceed U.S. minimum wage (Parrish et al., 2022). Because each task includes multiple BBQ questions, workers were able to answer roughly 5 questions in 2 minutes (Parrish et al., 2022) - i.e. ~24 seconds per question, or about 0.4 minutes per question. This fast per-question time reflects the fact that BBQ items are short multiple-choice queries, allowing a human annotator to complete approximately 150 BBQ questions in an hour at that pay rate (Parrish et al., 2022). + +GPQA Diamond consists of extremely difficult graduate-level science questions, so human experts demand high compensation. In one case, domain experts were paid about \(100 per hour to contribute and validate GPQA questions (Rein et al., 2024). These questions are "Google-proof" and time-consuming: skilled non-expert participants spent over 30-35 minutes on average per question when attempting to solve GPQA problems with unrestricted web access (Rein et al., 2024). This long duration per question underscores GPQA's complexity – at most 2 questions could be solved in an hour even by motivated annotators, which justifies the premium expert hourly rate (Rein, 2024). + +GSM8K contains grade-school level math word problems. Solving these is relatively time-efficient for adults: in one study, crowdworkers under time pressure managed to solve about 4.07 GSM8K problems in 15 minutes on average (Zhang et al., 2024). That corresponds to roughly 3.7 minutes per question for a human solver. The required skill is comparable to general math tutoring at the K-8 level, for which typical U.S. tutor rates are about $33 -$ 55 per hour on platforms like Wyzant (Wyzant Tutoring, 2025). At such a rate, paying a person to solve GSM8K problems would be economical, given that a proficient solver can complete + +approximately 16 questions in one hour (Zhang et al., 2024). + +MATH500 is a set of 500 advanced competition math problems (drawn from the harder tier of a larger MATH dataset). These problems are similar in difficulty to top-level contest questions such as late AIME or Olympiad qualifying problems. As with AIME, a well-prepared human might spend on the order of 10-15 minutes per problem, roughly $\sim$ 12 minutes on average for a hard competition question (Art of Problem Solving, 2023). Tutors capable of solving and teaching such Olympiad-level problems often charge rates on the order of $50 per hour (with a typical range of$ 35- $60 for competition math tutoring) (Wyzant Tutoring, 2025). This implies that solving roughly five MATH500 problems could cost about $50 and take around an hour, consistent with the per-question time and high skill required. + +Two-Digit Addition consists of simple two-digit addition problems, which are very quick for humans to solve. Early elementary students are often expected to complete about 20-25 basic addition problems in one minute in "mad minute" drills (Fas, 2021). This corresponds to roughly 2-3 seconds per addition (0.04 minutes per question). Because the task is so elementary, the labor to solve large numbers of such problems can be valued at a lower hourly rate. Simple data-entry style work or basic math tasks on freelance platforms pay on the order of $10 -$ 20 per hour (Upwork, 2025). At $15/hour, for example, a worker could theoretically solve several hundred 2-digit additions within the hour, given the ~3-second average solution time (Fas, 2021). + +# B. Details of Evaluation + +For each dataset in our evaluation, we sample up to 128 instances and run each model $n = 8$ times to estimate the expected runtime cost and accuracy per sample. For all models except OpenAI's reasoning models, we set the temperature to 0.7 and top_p to 1.0. In the case of OpenAI's reasoning models, we use a temperature of 1.0 and do not apply top_p. Additionally, we use the default maximum token generation limits provided by each model. Per sample, we employ a concise but descriptive instruction prompt for the models to follow. + +In our experiments, we define the pass $r_m(p)$ as whether the model obtains a correct answer after a single run or not (0 or 1), and the cost $c_m(p)$ as: + +$$ +c _ {m} (p) = n _ {\text {i n}} (m, p) \cdot c _ {\text {i n}} (m) + n _ {\text {o u t}} (m, p) \cdot c _ {\text {o u t}} (m) \tag {14} +$$ + +where $n_{*}(m,p)$ denotes the number of input / output tokens consumed / generated by the model $m$ on problem $p$ , and $c_{*}(m)$ denotes the dollar costs per input / output tokens consumed / generated by the model $m$ (see Table 4 for the pricing). For the expert costs, we utilize the estimations from Table 3, and set the rates to the upper-bound value to ensure the approximation of the expert accuracy being 1. + +
DatasetQualification RequirementsHourly RateTime per QuestionEst. Cost
AIMEAdvanced high-school contest math skills$45–$100~12 minutes$9–$20
BBQGeneral familiarity with social biases$15~0.4 minutes (24 sec)$0.10
GPQA Dia.Graduate-level domain expertise$100~35 minutes$58
GSM8KBasic arithmetic reasoning$33–$55~3.7 minutes$2–$3.50
MATH500Strong competition-level problem-solving$35–$60~12 minutes$7–$12
Two-Digit Add.Basic numeracy$10–$20~0.04 minutes (3 sec)$0.01–$0.02
+ +Table 3: Estimated costs of hiring a human expert to solve one question from each dataset, based on typical qualifications, hourly rates, and time per question. + +# Experiment Prompt + +```txt +Please solve the following question. You can explain your solution before presenting the final answer. Format your final answer as: ... Instructions: - For multiple-choice: Give only the letter (e.g., (A)). - For numeric: Give only the number (e.g., 42). - For free-response: Provide the full final answer text. INPUT: , , {input} , +``` + +# C. Additional Results + +# C.1. Expected Accuracy and Inference Costs + +As discussed in the Section 3.2, we share the results of expected cost and accuracy per model per dataset. We can observe the skewed preference of a particular model family under each metric, implying the inability of expressing economic impact of models through these metrics solely. + +# C.2. Relative Gain per Model Release + +Figure 4 presents the relative improvement in temporal frontier cost-of-pass for each model release, illustrated using bar plots. Namely, we calculate: + +$$ +\frac {G _ {p \sim D} \left(\left\{m _ {t} \right\} , \mathcal {M} _ {t - 1}\right)}{V _ {p \sim D} \left(\mathcal {M} _ {t - 1}\right)} \tag {15} +$$ + +The results indicate that the reasoning models demonstrate notable advancements, particularly on complex quantitative tasks. In contrast, lightweight models exhibit marked gains on basic tasks. These findings support the observations from our experiments (Sections 3.2, 3.4). Notably, The substantial improvement observed for GPT-4o is likely due to it being the first model included in our analysis, resulting in a pronounced leap relative to the baseline cost associated with human expert annotation. + +# C.3. Counterfactual Frontier Cost-of-Pass in the Absence of a Single Model + +In this section, following the methodology outlined in Section 3.4, we quantify the relative improvement in frontier cost-of-pass using a counterfactual approach. Specifically, for each model $m_{*}$ , we calculate the following: + +$$ +\frac {G _ {p \sim D} \left(\left\{m _ {*} \right\} , \mathcal {M} _ {T} \backslash \left\{m _ {*} \right\}\right)}{V _ {p \sim D} \left(\mathcal {M} _ {T} \backslash \left\{m _ {*} \right\}\right)}, \tag {16} +$$ + +quantifying the essentialness of the model $m_*$ . The results presented in Figure 5 demonstrate that the contributions of most individual models are largely compensable by the remaining models. Furthermore, we observe a similar coarse-level trend, as noted in Section 3.4, indicating that different model families provide greater benefits in specific task categories. + +# D. Limitations of Our Framework and Future Work Directions + +In this section, we acknowledge the limitations of the presented framework and propose directions for future improvements and extensions. + +A primary limitation pertains to our definitions and computations of cost $(C_p(m))$ and performance $(R_{p}(m))$ . Specifically, our current cost computation considers only input and output token costs as proxies for the total expense incurred in obtaining correct outputs. This approach neglects indirect or overhead costs associated with generating incorrect outputs, such as subsequent verification costs. Regarding per + +
CategoryModelRelease DateCost (per million tokens)
Input TokensOutput Tokens
Lightweight ModelsLlama-3.1-8B7/23/2024$0.18$0.18
GPT-4o Mini7/18/2024$0.15$0.60
Llama-3.3-70B12/6/2024$0.88$0.88
Large ModelsLlama-3.1-405B7/23/2024$3.50$3.50
GPT-4o5/13/2024$2.50$10.00
Claude Sonnet-3.56/20/2024$3.00$15.00
Reasoning ModelsOpenAI o1-mini9/12/2024$1.10$4.40
OpenAI o3-mini1/31/2025$1.10$4.40
DeepSeek-R11/20/2025$7.00$7.00
OpenAI o112/5/2024$15.00$60.00
+ +Table 4: Per-token inference costs with release dates. + +
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B89.4575.7821.4817.8737.3012.50
GPT-4o mini99.9088.5753.3218.0770.0214.58
Llama-3.3-70B99.9092.0985.0646.4872.7533.33
Large Models
Llama-3.1-405B99.7193.9585.7444.1467.8731.67
Claude Sonnet-3.5100.0094.4392.5855.3764.7515.83
GPT-4o99.7191.9990.0447.0773.1414.58
Reasoning Models
OpenAI o1-mini99.5192.5885.7449.1285.9453.33
OpenAI o1100.0094.0495.0273.8389.4572.50
DeepSeek-R1100.0093.3683.6954.8893.8560.83
OpenAI o3-mini100.0092.7783.7971.6888.5777.08
+ +Table 5: Accuracy (%) per model per dataset: ${R}_{m}\left( {p \sim D}\right)$ . In each column,the 3 entries with the highest accuracy have blue highlights. + +
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.2e-57.4e-55.2e-51.8e-41.5e-42.2e-4
GPT-4o mini5.4e-51.9e-41.0e-43.9e-43.7e-45.6e-4
Llama-3.3-70B1.6e-43.3e-43.1e-49.6e-46.7e-41.1e-3
Large Models
Llama-3.1-405B6.9e-41.4e-31.0e-33.0e-32.4e-33.7e-3
Claude Sonnet-3.52.1e-33.7e-33.0e-36.9e-35.9e-37.5e-3
GPT-4o2.3e-34.5e-32.7e-30.018.7e-30.01
Reasoning Models
OpenAI o1-mini5.4e-38.4e-37.6e-30.020.020.07
OpenAI o10.020.030.040.250.130.52
DeepSeek-R11.8e-35.1e-34.6e-30.040.010.04
OpenAI o3-mini1.1e-32.1e-32.6e-30.015.4e-30.02
+ +Table 6: Dollar cost incurred per model per dataset: ${C}_{m}\left( {p \sim D}\right)$ . In each column,the 3 entries with the lowest cost have blue highlights. + +![](images/69cafd20b82715c5ce5bb220598d484c9b0fbbe4119cd95059eba426708f91b8.jpg) + +![](images/d3e4f5986542f85a49bded9c7166a5adf5e87fd57d136419bdebcd51f870ec80.jpg) + +![](images/76e0e15128f0ec1aecaad72d16f7351d5da78fa90a7f7176ffeee04462d6efca.jpg) + +![](images/d905ee90552f923adb69e449a7d5dae8bc9ddb1cc5bc118940307875016f3c44.jpg) +Figure 4: Bar plot showing the percentage of change in frontier cost-of-pass per model release (i.e. $\frac{G_{p\sim D}(\{m_t\},\mathcal{M}_{t-1})}{V_{p\sim D}(\mathcal{M}_{t-1})}$ ) + +![](images/0528f4b08946c5d99d0f8a41955a8979a5315b0841581510756bf51c578c3295.jpg) + +![](images/d5c65aa52d9a808be3ad8cf01ef796aa6d446f1572a321a2ae404561039933af.jpg) + +formance, the use of accuracy as a binary success-or-failure metric presupposes the existence of a reliable verification pipeline and a practical decision mechanism, potentially oversimplifying scenarios where these assumptions do not hold. Additionally, our cost-of-pass metric, which combines cost and performance, currently does not account for variance information, limiting its practical interpretability in situations where two scenarios with similar cost-of-pass values exhibit substantially different variances. Furthermore, from a practical standpoint, cost modeling could consider alternative units (e.g., latency, inference time, FLOPs), which are currently not analyzed. + +Nevertheless, a significant strength of our framework is its abstract and modular design, facilitating extensions to address these limitations. Future work can enhance the precision of cost computations by integrating additional cost factors, such as verification overheads or indirect costs. Moreover, the framework could be adapted to alternative resource-consumption metrics like latency, inference time, or FLOPs. Regarding performance evaluation, the binary accuracy metric could be replaced or supplemented with alternative success measures tailored to specific scenarios, especially those emphasizing a particular balance between performance and cost. Incorporating variance and other statistical information into cost and performance calculations + +could also enhance practical usability and interpretability. + +An additional limitation lies in the evaluation methodology, particularly regarding human expert cost estimation. Our framework assumes that experts can reliably solve tasks given sufficient conditions (e.g., adequate qualifications, time, compensation). However, this assumption may not hold for particularly challenging problems or datasets with inherently high uncertainty in achieving correct solutions. Future research could address this limitation by conducting rigorous human subject studies to empirically evaluate and incorporate expert performance variability into the cost estimation process. + +![](images/376285dc3f0a3c03e98979c258df0c77b14362360a25c3cd690f23f67492e99c.jpg) +Figure 5: The relative improvement $(\%)$ in frontier cost-of-pass under a counterfactual setting, removing a model $m_*$ from the model set $\mathcal{M}_T$ . High values mean that the model is essential for maintaining the current frontier. \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13359/images/0528f4b08946c5d99d0f8a41955a8979a5315b0841581510756bf51c578c3295.jpg b/data/2025/2504_13xxx/2504.13359/images/0528f4b08946c5d99d0f8a41955a8979a5315b0841581510756bf51c578c3295.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10f46b7e3ed27a68ba6e2cf33617d961304977ed --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/0528f4b08946c5d99d0f8a41955a8979a5315b0841581510756bf51c578c3295.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7afe7f017e0a3ebe84ecb7a4021dc15b9d8e7c52e87da697d71f2ef00a90e82e +size 22056 diff --git a/data/2025/2504_13xxx/2504.13359/images/0724a272f5ed5babd39fe4257477bdcaf81aca618d51c9fbee49416a8902611d.jpg b/data/2025/2504_13xxx/2504.13359/images/0724a272f5ed5babd39fe4257477bdcaf81aca618d51c9fbee49416a8902611d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32e4e6e586a9add0562fc89d003a24e90665f12f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/0724a272f5ed5babd39fe4257477bdcaf81aca618d51c9fbee49416a8902611d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b80e6abcde28b3b899a2683e221034d62587d1db221e8042f661c3bd80aed24 +size 3030 diff --git a/data/2025/2504_13xxx/2504.13359/images/0ab620610ec8223db6b8994ced376a3e4057ef19e3c156b08c2e461f4d2866e2.jpg b/data/2025/2504_13xxx/2504.13359/images/0ab620610ec8223db6b8994ced376a3e4057ef19e3c156b08c2e461f4d2866e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02e294baa08121a08d65b01d10ea249025d15760 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/0ab620610ec8223db6b8994ced376a3e4057ef19e3c156b08c2e461f4d2866e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36623ca21c4709af4c4574c2bfa57d87193cc09b6ead34542096bef9d87dd171 +size 4191 diff --git a/data/2025/2504_13xxx/2504.13359/images/13635eb971e0c3632f92759d60ba47cf2eea72475e047a2d710ad29a2d1c717f.jpg b/data/2025/2504_13xxx/2504.13359/images/13635eb971e0c3632f92759d60ba47cf2eea72475e047a2d710ad29a2d1c717f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e12b1c75b7efbf1602d17140b343694a78afc0c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/13635eb971e0c3632f92759d60ba47cf2eea72475e047a2d710ad29a2d1c717f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ff3cf260ba02827bc00146fb926d43d8caf5557ac57c88c01ba8bfb5afce388 +size 16976 diff --git a/data/2025/2504_13xxx/2504.13359/images/16bb05ff693bf5ab03e08b00cbae1fffb84268ea796fdc389e85b0fd26e7e712.jpg b/data/2025/2504_13xxx/2504.13359/images/16bb05ff693bf5ab03e08b00cbae1fffb84268ea796fdc389e85b0fd26e7e712.jpg new file mode 100644 index 0000000000000000000000000000000000000000..850efa4db3e0defede1d3947c59fcb21fadfe365 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/16bb05ff693bf5ab03e08b00cbae1fffb84268ea796fdc389e85b0fd26e7e712.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db17eeab55a386b5b6ffc61f5941605b1a5600222b4f1761a9131d28a35c2f6c +size 108265 diff --git a/data/2025/2504_13xxx/2504.13359/images/1d8be8a5cde6ec3465198b80b6ba7628f0969f0f90b7df215617d85fa21b7dab.jpg b/data/2025/2504_13xxx/2504.13359/images/1d8be8a5cde6ec3465198b80b6ba7628f0969f0f90b7df215617d85fa21b7dab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3792f0f20bbe34ce6a549c08ab1847fafa62be29 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/1d8be8a5cde6ec3465198b80b6ba7628f0969f0f90b7df215617d85fa21b7dab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9da60f6ef911a5d2b4ea3277ea1186e45b3bf7b097ce4e528886e285a2001e11 +size 4544 diff --git a/data/2025/2504_13xxx/2504.13359/images/326a80519a5e711cd95c953ca4e4bfdf62a1180de18fd344f54905a9190e9426.jpg b/data/2025/2504_13xxx/2504.13359/images/326a80519a5e711cd95c953ca4e4bfdf62a1180de18fd344f54905a9190e9426.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bad49128f108a4b94f90f212e4f93431a942f48 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/326a80519a5e711cd95c953ca4e4bfdf62a1180de18fd344f54905a9190e9426.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58c2b60a6a50fbe530990e5a44e653b5509c9671f6731c05a4d9e0624651508a +size 17166 diff --git a/data/2025/2504_13xxx/2504.13359/images/3499681d1dd7b5d2dc62410a01e17888d372d6c157900ebd5c05d64966620b55.jpg b/data/2025/2504_13xxx/2504.13359/images/3499681d1dd7b5d2dc62410a01e17888d372d6c157900ebd5c05d64966620b55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43ffb87228483564d3e2e22ff65cf78d1d2a4379 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/3499681d1dd7b5d2dc62410a01e17888d372d6c157900ebd5c05d64966620b55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12552c71f34b03fd1efc5692414f68e843a6dcd809ec51a125240f54bad5398b +size 4501 diff --git a/data/2025/2504_13xxx/2504.13359/images/376285dc3f0a3c03e98979c258df0c77b14362360a25c3cd690f23f67492e99c.jpg b/data/2025/2504_13xxx/2504.13359/images/376285dc3f0a3c03e98979c258df0c77b14362360a25c3cd690f23f67492e99c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6da047fe4af0a17babe216c815ee3f084b3a19ca --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/376285dc3f0a3c03e98979c258df0c77b14362360a25c3cd690f23f67492e99c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75b077a0787d37bc784b815b7f4380dbb0fcea44f2a3d334aab1d9ecf06b14c0 +size 86548 diff --git a/data/2025/2504_13xxx/2504.13359/images/3a562bbe1004e7cf970e8b8277eea3ed839c8f64724ec353dd49ba5d688790e0.jpg b/data/2025/2504_13xxx/2504.13359/images/3a562bbe1004e7cf970e8b8277eea3ed839c8f64724ec353dd49ba5d688790e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14d127ce2bd04abf98e236ef41f1a28f589126e2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/3a562bbe1004e7cf970e8b8277eea3ed839c8f64724ec353dd49ba5d688790e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f62696a53e5bb1f4d286c70705d6e68b160bb63dada27192387eadb7070c80c7 +size 75194 diff --git a/data/2025/2504_13xxx/2504.13359/images/3be70cbe724498608dfb9db5e73a4241a5c2f156493a7e6794bc974ffe034290.jpg b/data/2025/2504_13xxx/2504.13359/images/3be70cbe724498608dfb9db5e73a4241a5c2f156493a7e6794bc974ffe034290.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c7287787ec251023138a2b2fa81116d7e3b947c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/3be70cbe724498608dfb9db5e73a4241a5c2f156493a7e6794bc974ffe034290.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2faf1d30e20165d53638fff766a8968aa05fbe3b7bd047ac396c31599a76c981 +size 16664 diff --git a/data/2025/2504_13xxx/2504.13359/images/47be0a10772b39136d58f63b010cf469741ce68516e4b1f263aca909dff5a51e.jpg b/data/2025/2504_13xxx/2504.13359/images/47be0a10772b39136d58f63b010cf469741ce68516e4b1f263aca909dff5a51e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cec161628d14c3450b6cfba449b37dc46f4483a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/47be0a10772b39136d58f63b010cf469741ce68516e4b1f263aca909dff5a51e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f8ed0bee9929117a1e63b1e32e38d69116be52ccf2a596df08a09d0a17fec77 +size 4306 diff --git a/data/2025/2504_13xxx/2504.13359/images/482987eb0bc339f8ccfedce3697ce3ff1811641a2ba6b8966508ad35ca724c8a.jpg b/data/2025/2504_13xxx/2504.13359/images/482987eb0bc339f8ccfedce3697ce3ff1811641a2ba6b8966508ad35ca724c8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7272880c89bc199e20282dcd0ed982b7c375dc3a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/482987eb0bc339f8ccfedce3697ce3ff1811641a2ba6b8966508ad35ca724c8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:702cf1dfa05999aab363eddec66c793d70da09664b0dc2118b10e6b9eb391b7b +size 16359 diff --git a/data/2025/2504_13xxx/2504.13359/images/4b8807ce16c96c9c219fb85d28b143bb77eeb299eced28692be856c6c02086e6.jpg b/data/2025/2504_13xxx/2504.13359/images/4b8807ce16c96c9c219fb85d28b143bb77eeb299eced28692be856c6c02086e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7df5832a88d8eefc9b9b10f11b81994ff0093e2a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/4b8807ce16c96c9c219fb85d28b143bb77eeb299eced28692be856c6c02086e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85e3f4f82b7802b81f587374a1af04045d319f48610d08f3b356f1c29e6654b2 +size 5453 diff --git a/data/2025/2504_13xxx/2504.13359/images/4ca0534a6db9b082749f414aa1909bd73d9de393fc00c36500f0aa490c7c2780.jpg b/data/2025/2504_13xxx/2504.13359/images/4ca0534a6db9b082749f414aa1909bd73d9de393fc00c36500f0aa490c7c2780.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92a298da624fdf4a6667ac9af47b68657f6336ce --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/4ca0534a6db9b082749f414aa1909bd73d9de393fc00c36500f0aa490c7c2780.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d87036a52e35a4a43a9c30fb8af0382e3291f9332ccdc7181492cf92d536816 +size 3380 diff --git a/data/2025/2504_13xxx/2504.13359/images/50036048d55bd58bd00e762774ed0bdd190e68504cb77bf06c5f2d7a0774f944.jpg b/data/2025/2504_13xxx/2504.13359/images/50036048d55bd58bd00e762774ed0bdd190e68504cb77bf06c5f2d7a0774f944.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53ac6bf8e166a45c5745d1766560edcb8a1d2283 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/50036048d55bd58bd00e762774ed0bdd190e68504cb77bf06c5f2d7a0774f944.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac00553d9c58831e4b91339c93bdd3365e133b746fe737f6dbb222fe5f7204f2 +size 16294 diff --git a/data/2025/2504_13xxx/2504.13359/images/522c2601cc01bbc0a0ebbd3ad816b7d64f965dcbdf9ec52f20e4ef99b0dd25fb.jpg b/data/2025/2504_13xxx/2504.13359/images/522c2601cc01bbc0a0ebbd3ad816b7d64f965dcbdf9ec52f20e4ef99b0dd25fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1543839c3dca8100c16ac84489dbea7175dff1ff --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/522c2601cc01bbc0a0ebbd3ad816b7d64f965dcbdf9ec52f20e4ef99b0dd25fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5063910d6d6f0886070e88d2d19407947b551e484c0705153d0681659066f973 +size 92845 diff --git a/data/2025/2504_13xxx/2504.13359/images/630111621fe5a87da45a6dc5cf0ef2e8173bf3a731e83a851a6919989be45eec.jpg b/data/2025/2504_13xxx/2504.13359/images/630111621fe5a87da45a6dc5cf0ef2e8173bf3a731e83a851a6919989be45eec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed452efab458a465239792f6467278bbeaef21f8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/630111621fe5a87da45a6dc5cf0ef2e8173bf3a731e83a851a6919989be45eec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1840e431f116b2a336902820b8e7aa51d13be4a7196a02945c249c347f9b1f86 +size 118983 diff --git a/data/2025/2504_13xxx/2504.13359/images/656223ea2ac1042dfb13629ea5090a3c50b8b896f40895d187fa7bff7f374bc2.jpg b/data/2025/2504_13xxx/2504.13359/images/656223ea2ac1042dfb13629ea5090a3c50b8b896f40895d187fa7bff7f374bc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8510fe086234b1097dc788e647bb0523d3d81274 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/656223ea2ac1042dfb13629ea5090a3c50b8b896f40895d187fa7bff7f374bc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ccd9978082d820e474a51c103e6736952b7ebad8ed3a4affa8bcead9cd66b47 +size 5977 diff --git a/data/2025/2504_13xxx/2504.13359/images/662c2beaad31b19c239610a14b9523bf0e990241e88f3fcbe90b760c709f7e3c.jpg b/data/2025/2504_13xxx/2504.13359/images/662c2beaad31b19c239610a14b9523bf0e990241e88f3fcbe90b760c709f7e3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9936d62a277a0c522600d27633b918ade1417953 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/662c2beaad31b19c239610a14b9523bf0e990241e88f3fcbe90b760c709f7e3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79f709246ac2d60a0455362a8fd5c23e437bc0d239abbeeda55e0afa21d8fd54 +size 6047 diff --git a/data/2025/2504_13xxx/2504.13359/images/69cafd20b82715c5ce5bb220598d484c9b0fbbe4119cd95059eba426708f91b8.jpg b/data/2025/2504_13xxx/2504.13359/images/69cafd20b82715c5ce5bb220598d484c9b0fbbe4119cd95059eba426708f91b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..783ad68e94a5bc1fda154488e0e91eb152180676 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/69cafd20b82715c5ce5bb220598d484c9b0fbbe4119cd95059eba426708f91b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e8ce073f1ff762369b661598708f02f9c7dd96527166f1cabe60c7c4b88fb5 +size 21047 diff --git a/data/2025/2504_13xxx/2504.13359/images/6a7b13390796c6478613ca64b9cf10c4a3baf8f8a05b9b4e501d16514ff5c7b5.jpg b/data/2025/2504_13xxx/2504.13359/images/6a7b13390796c6478613ca64b9cf10c4a3baf8f8a05b9b4e501d16514ff5c7b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f6c6e0e269428b3bcf632e74756e15596a52cd8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/6a7b13390796c6478613ca64b9cf10c4a3baf8f8a05b9b4e501d16514ff5c7b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5155dc730c28a358a74732638f127e9196208290ac8a5c915cec047b8d0d716 +size 4584 diff --git a/data/2025/2504_13xxx/2504.13359/images/7669a7a3ba5af13d09f43461712be4673c3d790558add3473d751fed207038d0.jpg b/data/2025/2504_13xxx/2504.13359/images/7669a7a3ba5af13d09f43461712be4673c3d790558add3473d751fed207038d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc45e8ae3d048d81ec99b95543c91f8d9b331e44 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/7669a7a3ba5af13d09f43461712be4673c3d790558add3473d751fed207038d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bae1ebc3b58ebe43a8aef00233e1993cad1fdd62cd3a8649eef20ade95c1513 +size 6851 diff --git a/data/2025/2504_13xxx/2504.13359/images/76e0e15128f0ec1aecaad72d16f7351d5da78fa90a7f7176ffeee04462d6efca.jpg b/data/2025/2504_13xxx/2504.13359/images/76e0e15128f0ec1aecaad72d16f7351d5da78fa90a7f7176ffeee04462d6efca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a56f671aa4856bda770f3c00b32aeb64113d7e7d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/76e0e15128f0ec1aecaad72d16f7351d5da78fa90a7f7176ffeee04462d6efca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bc51b8dff085847bda2d25ed39fa942f5826e3505f712a33d5c487cdec6b73a +size 20900 diff --git a/data/2025/2504_13xxx/2504.13359/images/832bfe627fe748fd7b35bea328fab3574c3707e52b56a17298ca44617e7fc88e.jpg b/data/2025/2504_13xxx/2504.13359/images/832bfe627fe748fd7b35bea328fab3574c3707e52b56a17298ca44617e7fc88e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0fe9b767bd1c8f39e65c260b4132cc9dce640dc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/832bfe627fe748fd7b35bea328fab3574c3707e52b56a17298ca44617e7fc88e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc151298ebacf6b759c0c86c21deadd2db67ad9838eb98cd1fb113ad4bfac923 +size 17212 diff --git a/data/2025/2504_13xxx/2504.13359/images/869b9e8e33e8a21c6ac492789fe60d695ff8a85c3c275e6387b51ddd84afe964.jpg b/data/2025/2504_13xxx/2504.13359/images/869b9e8e33e8a21c6ac492789fe60d695ff8a85c3c275e6387b51ddd84afe964.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb73a1f34c7b4742cce23fa95b370211336b3166 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/869b9e8e33e8a21c6ac492789fe60d695ff8a85c3c275e6387b51ddd84afe964.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1448a6a26bde4331abcf273dfb6add47e65196140239395b5fbcfc0c65990578 +size 15799 diff --git a/data/2025/2504_13xxx/2504.13359/images/8d467f217f9407528afd6c84dbf9877b030a64551f57ce84212c3a7b5172a491.jpg b/data/2025/2504_13xxx/2504.13359/images/8d467f217f9407528afd6c84dbf9877b030a64551f57ce84212c3a7b5172a491.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f7558205e1ac723f4ca6768bb67b8f1c306885c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/8d467f217f9407528afd6c84dbf9877b030a64551f57ce84212c3a7b5172a491.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f7382c852e4876d0fe9dfbe81a3e92fd66c7db83587e473a31e1e00a5ab9c9b +size 13912 diff --git a/data/2025/2504_13xxx/2504.13359/images/929a09e0fe5367a1f0d28cf97f7fb951feaaade98d98308c1806d8f0b0911abf.jpg b/data/2025/2504_13xxx/2504.13359/images/929a09e0fe5367a1f0d28cf97f7fb951feaaade98d98308c1806d8f0b0911abf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c40b87b6ff7cb7b473811b1e1bc1e08d434e6a5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/929a09e0fe5367a1f0d28cf97f7fb951feaaade98d98308c1806d8f0b0911abf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e4f70f7c9b14194e46bbc70f4acae9da3a5b0ca2e57cebe95af3ecb6586198f +size 5660 diff --git a/data/2025/2504_13xxx/2504.13359/images/9a8f99b249a9f5e30425b3deabcfed43f74023c4cde283f0657c48f6b00e62e5.jpg b/data/2025/2504_13xxx/2504.13359/images/9a8f99b249a9f5e30425b3deabcfed43f74023c4cde283f0657c48f6b00e62e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7dc1fde5a5187bf425cbb64f31adb205bfe11c2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/9a8f99b249a9f5e30425b3deabcfed43f74023c4cde283f0657c48f6b00e62e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3da89cf6b3fc90d635edb8c2c4cb4aa6fbfa91bedadceff31377b91cc8961d51 +size 6182 diff --git a/data/2025/2504_13xxx/2504.13359/images/9d53b8cd44a84eba0df1eeb76dcbe7ecc97671c12f98e669fc6a7ce56b38edfa.jpg b/data/2025/2504_13xxx/2504.13359/images/9d53b8cd44a84eba0df1eeb76dcbe7ecc97671c12f98e669fc6a7ce56b38edfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d3fd60515220a8f5e1c61cc7a226ca9a62c848a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/9d53b8cd44a84eba0df1eeb76dcbe7ecc97671c12f98e669fc6a7ce56b38edfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2dc5f8ccfcb7de7ff1104a17371c1e3db369af583f992a31151fed16fc992ea +size 4131 diff --git a/data/2025/2504_13xxx/2504.13359/images/9d65079fdb587bed54bc16355915a89bba1090a93865eeaf66281756361e237d.jpg b/data/2025/2504_13xxx/2504.13359/images/9d65079fdb587bed54bc16355915a89bba1090a93865eeaf66281756361e237d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28430362f3147e1ba647ae41111800425a150cb4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/9d65079fdb587bed54bc16355915a89bba1090a93865eeaf66281756361e237d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fae9bb429bde872df287acb8138118ca76103001de56d10fe3d7dd532ba7250b +size 38717 diff --git a/data/2025/2504_13xxx/2504.13359/images/af65fcac32682b6e5ca9b5ec10bb48eec299698dbff86670f68f02431e483ce3.jpg b/data/2025/2504_13xxx/2504.13359/images/af65fcac32682b6e5ca9b5ec10bb48eec299698dbff86670f68f02431e483ce3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8418859b5af541b1f4ccde04d3e84f07f66cc9c0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/af65fcac32682b6e5ca9b5ec10bb48eec299698dbff86670f68f02431e483ce3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d04a25a9e56f328a2e1c547cc701b23e73f5a97c8e9baa50b6293f9dc0a027d6 +size 69172 diff --git a/data/2025/2504_13xxx/2504.13359/images/b96bef1b68e8cefc8bc75f2d79919123bd348ee3bb4a32c8bf3c69c8ae65d66d.jpg b/data/2025/2504_13xxx/2504.13359/images/b96bef1b68e8cefc8bc75f2d79919123bd348ee3bb4a32c8bf3c69c8ae65d66d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32485f197f3dfd9bc7cbf1bdd5192a14f7bd756e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/b96bef1b68e8cefc8bc75f2d79919123bd348ee3bb4a32c8bf3c69c8ae65d66d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1938e65b812ad01096b54fb59cab2b0e28d4ba0895ef88bb5dd7a65ef4c7cb61 +size 2904 diff --git a/data/2025/2504_13xxx/2504.13359/images/c99d5a6c57a920288412adc207ba75223db787139378e8b8e109f1e5f1a56eea.jpg b/data/2025/2504_13xxx/2504.13359/images/c99d5a6c57a920288412adc207ba75223db787139378e8b8e109f1e5f1a56eea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f072943f10eab47f3fa4e644aa34e874e022ad03 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/c99d5a6c57a920288412adc207ba75223db787139378e8b8e109f1e5f1a56eea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5626a6831cfcac8be4c7415f5b6d8920a67ffebbf4ad18e5ec3594d0ccd8e5ed +size 6541 diff --git a/data/2025/2504_13xxx/2504.13359/images/d3e4f5986542f85a49bded9c7166a5adf5e87fd57d136419bdebcd51f870ec80.jpg b/data/2025/2504_13xxx/2504.13359/images/d3e4f5986542f85a49bded9c7166a5adf5e87fd57d136419bdebcd51f870ec80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3da74846d5e1792208e6f48c7d809fb92c771fb8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/d3e4f5986542f85a49bded9c7166a5adf5e87fd57d136419bdebcd51f870ec80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bf64a549de30b955bb46e245f41d523c365c1bfc5174094042f008ab897bd7a +size 19977 diff --git a/data/2025/2504_13xxx/2504.13359/images/d46c8433e08f0b502f2ad703fc5d518910f28451186239de2344b2aae522e61c.jpg b/data/2025/2504_13xxx/2504.13359/images/d46c8433e08f0b502f2ad703fc5d518910f28451186239de2344b2aae522e61c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb1dd470f0d40f2ed22adfc8a0fc596d710535ba --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/d46c8433e08f0b502f2ad703fc5d518910f28451186239de2344b2aae522e61c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:362008abcbd9b3ebe63caa2394b1e4ae001e925a930f4a3a818288b3cc63bde1 +size 4901 diff --git a/data/2025/2504_13xxx/2504.13359/images/d5c65aa52d9a808be3ad8cf01ef796aa6d446f1572a321a2ae404561039933af.jpg b/data/2025/2504_13xxx/2504.13359/images/d5c65aa52d9a808be3ad8cf01ef796aa6d446f1572a321a2ae404561039933af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f53c3e1b87201470c6bcc25b000ced507b058b3 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/d5c65aa52d9a808be3ad8cf01ef796aa6d446f1572a321a2ae404561039933af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18a0b9958550b77e6a1fa83f33e9f4868384b5433b980057b655c94a8b4c3ecf +size 22213 diff --git a/data/2025/2504_13xxx/2504.13359/images/d5d1dd0f4fce7f531bc4229a090b9b204042e706c64543dd30f1b2553c279874.jpg b/data/2025/2504_13xxx/2504.13359/images/d5d1dd0f4fce7f531bc4229a090b9b204042e706c64543dd30f1b2553c279874.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b25377723938a054c6e4ded31220e3c39a11646 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/d5d1dd0f4fce7f531bc4229a090b9b204042e706c64543dd30f1b2553c279874.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8318f1a3a526c35eff15f82043a0866c8acde14ee277b968b093c7884b8f24 +size 35945 diff --git a/data/2025/2504_13xxx/2504.13359/images/d6ab12161c3f30622480c922bd2826ffd09e7b876cd0d4635e04bbc52454d303.jpg b/data/2025/2504_13xxx/2504.13359/images/d6ab12161c3f30622480c922bd2826ffd09e7b876cd0d4635e04bbc52454d303.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6a2bf075af9d4294c2506924eac7d00b062fa5b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/d6ab12161c3f30622480c922bd2826ffd09e7b876cd0d4635e04bbc52454d303.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6691ddc34b1480f61dea8b499e0a46fc9e09bda4fc8479191d6e7eeb3eeda32 +size 4541 diff --git a/data/2025/2504_13xxx/2504.13359/images/d905ee90552f923adb69e449a7d5dae8bc9ddb1cc5bc118940307875016f3c44.jpg b/data/2025/2504_13xxx/2504.13359/images/d905ee90552f923adb69e449a7d5dae8bc9ddb1cc5bc118940307875016f3c44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5886d317de0aab2a7db196c5f84a9ead99d80e3e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/d905ee90552f923adb69e449a7d5dae8bc9ddb1cc5bc118940307875016f3c44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3f5aa6125fd1377424512df41b304f1b098b57b886ef8859eb7f40c5367f860 +size 18964 diff --git a/data/2025/2504_13xxx/2504.13359/images/e80adc147208c7a870476ff72078de8f24d2604feef4f8aab2fc804c0c08726b.jpg b/data/2025/2504_13xxx/2504.13359/images/e80adc147208c7a870476ff72078de8f24d2604feef4f8aab2fc804c0c08726b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16617dbbf0e656775fbe8ebb314a6c9b2d94c037 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/e80adc147208c7a870476ff72078de8f24d2604feef4f8aab2fc804c0c08726b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a25829c724b6b399b62e9fb4a0e480ecd6503f01639625592dabba5b8e21779c +size 7293 diff --git a/data/2025/2504_13xxx/2504.13359/images/ede810bce160eb35530e054ffba7c60f1bf8fe524c89eebf58828bb29e75b3a5.jpg b/data/2025/2504_13xxx/2504.13359/images/ede810bce160eb35530e054ffba7c60f1bf8fe524c89eebf58828bb29e75b3a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..071540441bc29d30fdf1399ccdfc59d51a6a977e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/ede810bce160eb35530e054ffba7c60f1bf8fe524c89eebf58828bb29e75b3a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff126eebe0c8c791e9a1415b301adc446d0a2c5ba14e05329463ee81ac997a67 +size 3477 diff --git a/data/2025/2504_13xxx/2504.13359/images/f1e0c2765cf0b0f42a347df84e77cd52bdd634575ea7b0bab1fce2a5610de61a.jpg b/data/2025/2504_13xxx/2504.13359/images/f1e0c2765cf0b0f42a347df84e77cd52bdd634575ea7b0bab1fce2a5610de61a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d110ddcdb7c4b3c9d7545a9a88a1daee7ae105d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/f1e0c2765cf0b0f42a347df84e77cd52bdd634575ea7b0bab1fce2a5610de61a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35805903ab1714d77f6757813cb1bfb765bfd5a70c9e15f816cfa65a50d162f1 +size 13106 diff --git a/data/2025/2504_13xxx/2504.13359/images/fbd30ce96b94f3798861605fb3b986a8070b9c16bb602ce9ffd2872f3cda7836.jpg b/data/2025/2504_13xxx/2504.13359/images/fbd30ce96b94f3798861605fb3b986a8070b9c16bb602ce9ffd2872f3cda7836.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28e61750d55bbc5bb999d1f9dabe234a021e7212 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/images/fbd30ce96b94f3798861605fb3b986a8070b9c16bb602ce9ffd2872f3cda7836.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad762e1147b9f82718ff03576cb0b2020157d71d204f2d83e5d6f53ea3ce4749 +size 14963 diff --git a/data/2025/2504_13xxx/2504.13359/layout.json b/data/2025/2504_13xxx/2504.13359/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e5ca0e330a7873f98833a966fdf98751682d7f7e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13359/layout.json @@ -0,0 +1,13278 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 71, + 87, + 523, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 87, + 523, + 104 + ], + "spans": [ + { + "bbox": [ + 71, + 87, + 523, + 104 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 140, + 488, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 488, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 488, + 153 + ], + "type": "text", + "content": "Mehmet Hamza Erol* 1 Batu El* 1 Mirac Suzgun* 1 Mert Yuksekgonul† 1 James Zou† 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 148, + 175, + 196, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 175, + 196, + 187 + ], + "spans": [ + { + "bbox": [ + 148, + 175, + 196, + 187 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 199, + 272, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 199, + 272, + 656 + ], + "spans": [ + { + "bbox": [ + 72, + 199, + 272, + 656 + ], + "type": "text", + "content": "The widespread adoption of AI systems in the economy hinges on their ability to generate economic value that outweighs their inference costs. Evaluating this tradeoff requires metrics that account for both performance and costs. We propose a framework grounded in production theory for evaluating language models by combining accuracy and inference cost. We introduce cost-of-pass, the expected monetary cost of generating a correct solution. We then define the frontier cost-of-pass as the minimum cost-of-pass achievable across available models or the human-expert, using the approximate cost of hiring an expert. Our analysis reveals distinct economic insights. First, lightweight models are most cost-effective for basic quantitative tasks, large models for knowledge-intensive ones, and reasoning models for complex quantitative problems, despite higher per-token costs. Second, tracking this frontier cost-of-pass over the past year reveals significant progress, particularly for complex quantitative tasks where the cost has roughly halved every few months. Third, to trace key innovations driving this progress, we examine counterfactual frontiers—estimates of cost-efficiency without specific model classes. We find that innovations in lightweight, large, and reasoning models have been essential for pushing the frontier in basic quantitative, knowledge-intensive, and complex quantitative tasks, respectively. Finally, we assess the cost-reductions afforded by common inference-time techniques like majority voting and self-refinement, finding that their marginal accuracy gains rarely justify their costs. Our findings underscore that complementary model-level innovations are the primary drivers of cost-efficiency, and our economic framework provides a principled tool for measuring this progress and guiding deployment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 175, + 385, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 175, + 385, + 186 + ], + "spans": [ + { + "bbox": [ + 306, + 175, + 385, + 186 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 194, + 543, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 194, + 543, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 194, + 543, + 506 + ], + "type": "text", + "content": "The recent progress in generative AI, particularly language models (LMs), has sparked significant interest in their potential to transform industries, automate cognitive tasks, and reshape economic productivity (Brynolfsson et al., 2025; Eloundou et al., 2024; Acemoglu, 2024). The widespread adoption of these AI systems in the economy hinges on whether the economic benefits generated by the tasks they can perform outweigh the associated inference costs, and whether those inference costs are lower than the cost of equivalent human labor. Consequently, two priorities have emerged at the forefront of LM research: advancing capabilities and reducing costs. These goals, however, often involve trade-offs with more powerful models or test-time techniques that offer higher accuracy at the expense of greater computational and monetary cost (Chen et al., 2024; Parashar et al., 2025; Madaan et al., 2023; Wang et al., 2023; Kapoor et al., 2024). While standard metrics capture accuracy or other system capabilities, they fail to account for cost, leading to an incomplete picture of progress. Ultimately, what matters to the users is not just raw capability, but the value delivered relative to cost and the standard has been to interpret and report these separately. As the ecosystem of models grows, it is essential to assess new models not in isolation, but in the context of a broader ecosystem, where marginal improvements may or may not justify higher costs, and do so in an easy-to-interpret manner." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 511, + 544, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 544, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 544, + 715 + ], + "type": "text", + "content": "To systematically investigate the trade-off between cost and performance and analyze the LM ecosystem as a whole, we draw insights from a well-established and foundational framework from economics: production frontiers. Economists have long studied these frontiers, which map a set of inputs to the maximum output attainable under a given technology (Farrell, 1957). In Farrell's original formulation, a producer is technically efficient if no input can be reduced without lowering output, and price efficient if the input mix minimizes cost given input prices. Together, these conditions yield the lowest possible cost per unit of output. Extending this framework, Aigner et al. (1977) introduced stochastic frontier production functions, in which the relationship between inputs and output is modeled as stochastic rather than deterministic, practically accounting for potential defective outputs that do not pass evaluation criteria due to factors beyond the producer's control." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.13359v1 [cs.AI] 17 Apr 2025" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 673, + 132, + 683 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 132, + 683 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 132, + 683 + ], + "type": "text", + "content": "\\*Co-first authors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 684, + 139, + 694 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 684, + 139, + 694 + ], + "spans": [ + { + "bbox": [ + 67, + 684, + 139, + 694 + ], + "type": "text", + "content": "†Co-senior authors." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 695, + 287, + 706 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 695, + 287, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 695, + 287, + 706 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 67, + 695, + 287, + 706 + ], + "type": "text", + "content": "Stanford University. " + }, + { + "bbox": [ + 67, + 695, + 287, + 706 + ], + "type": "inline_equation", + "content": "\\boxtimes" + }, + { + "bbox": [ + 67, + 695, + 287, + 706 + ], + "type": "text", + "content": " {mhamza, jamesz}@stanford.edu." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 707, + 264, + 716 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 707, + 264, + 716 + ], + "spans": [ + { + "bbox": [ + 67, + 707, + 264, + 716 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 67, + 707, + 264, + 716 + ], + "type": "text", + "content": "https://github.com/mhamzaerol/Cost-of-Pass." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 96, + 79, + 165, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 79, + 165, + 95 + ], + "spans": [ + { + "bbox": [ + 96, + 79, + 165, + 95 + ], + "type": "text", + "content": "Concepts" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 72, + 114, + 188, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 114, + 188, + 134 + ], + "spans": [ + { + "bbox": [ + 72, + 114, + 188, + 134 + ], + "type": "text", + "content": "Cost-of-Pass: Expected cost of producing a correct output." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 139, + 189, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 139, + 189, + 159 + ], + "spans": [ + { + "bbox": [ + 72, + 139, + 189, + 159 + ], + "type": "interline_equation", + "content": "v (m, p) = \\frac {\\mathbb {E} [ \\operatorname {c o s t} _ {m} (p) ]}{\\mathbb {E} [ \\operatorname {a c c u r a c y} _ {m} (p) ]} = \\frac {C _ {m} (p)}{R _ {m} (p)}", + "image_path": "d46c8433e08f0b502f2ad703fc5d518910f28451186239de2344b2aae522e61c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 175, + 188, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 175, + 188, + 203 + ], + "spans": [ + { + "bbox": [ + 72, + 175, + 188, + 203 + ], + "type": "text", + "content": "Human Expert Baseline Cost: Cost of hiring a human expert to produce a correct output." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 88, + 206, + 173, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 206, + 173, + 217 + ], + "spans": [ + { + "bbox": [ + 88, + 206, + 173, + 217 + ], + "type": "interline_equation", + "content": "v (\\mathrm {e x p e r t}, p) \\approx C _ {\\mathrm {e x p e r t}} (p)", + "image_path": "0724a272f5ed5babd39fe4257477bdcaf81aca618d51c9fbee49416a8902611d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 235, + 189, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 235, + 189, + 263 + ], + "spans": [ + { + "bbox": [ + 71, + 235, + 189, + 263 + ], + "type": "text", + "content": "Frontier Cost-of-Pass: Lowest cost-of-pass given available set of LMs & human expert baseline." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 89, + 266, + 172, + 278 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 266, + 172, + 278 + ], + "spans": [ + { + "bbox": [ + 89, + 266, + 172, + 278 + ], + "type": "interline_equation", + "content": "\\min \\left(V _ {p} (\\mathcal {M}), v (\\text {e x p e r t}, p)\\right)", + "image_path": "b96bef1b68e8cefc8bc75f2d79919123bd348ee3bb4a32c8bf3c69c8ae65d66d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 282, + 191, + 294 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 282, + 191, + 294 + ], + "spans": [ + { + "bbox": [ + 69, + 282, + 191, + 294 + ], + "type": "interline_equation", + "content": "V _ {p} (\\mathcal {M}) = \\min _ {m \\in \\mathcal {M}} v (m, p) \\longrightarrow \\text {B e s t L M C o s t - o f - P a s s}", + "image_path": "4ca0534a6db9b082749f414aa1909bd73d9de393fc00c36500f0aa490c7c2780.jpg" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 214, + 105, + 364, + 180 + ], + "blocks": [ + { + "bbox": [ + 216, + 73, + 362, + 100 + ], + "lines": [ + { + "bbox": [ + 216, + 73, + 362, + 100 + ], + "spans": [ + { + "bbox": [ + 216, + 73, + 362, + 100 + ], + "type": "text", + "content": "(A) Frontier Cost-of-pass & Human Expert Baseline" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 214, + 105, + 364, + 180 + ], + "lines": [ + { + "bbox": [ + 214, + 105, + 364, + 180 + ], + "spans": [ + { + "bbox": [ + 214, + 105, + 364, + 180 + ], + "type": "image", + "image_path": "8d467f217f9407528afd6c84dbf9877b030a64551f57ce84212c3a7b5172a491.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 213, + 224, + 366, + 305 + ], + "blocks": [ + { + "bbox": [ + 225, + 193, + 354, + 218 + ], + "lines": [ + { + "bbox": [ + 225, + 193, + 354, + 218 + ], + "spans": [ + { + "bbox": [ + 225, + 193, + 354, + 218 + ], + "type": "text", + "content": "(B) Progress as Frontier Cost-of-Pass over Time" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 213, + 224, + 366, + 305 + ], + "lines": [ + { + "bbox": [ + 213, + 224, + 366, + 305 + ], + "spans": [ + { + "bbox": [ + 213, + 224, + 366, + 305 + ], + "type": "image", + "image_path": "869b9e8e33e8a21c6ac492789fe60d695ff8a85c3c275e6387b51ddd84afe964.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 379, + 105, + 534, + 182 + ], + "blocks": [ + { + "bbox": [ + 383, + 73, + 533, + 100 + ], + "lines": [ + { + "bbox": [ + 383, + 73, + 533, + 100 + ], + "spans": [ + { + "bbox": [ + 383, + 73, + 533, + 100 + ], + "type": "text", + "content": "(C) Essentialness of Model Families to Task Categories" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 379, + 105, + 534, + 182 + ], + "lines": [ + { + "bbox": [ + 379, + 105, + 534, + 182 + ], + "spans": [ + { + "bbox": [ + 379, + 105, + 534, + 182 + ], + "type": "image", + "image_path": "f1e0c2765cf0b0f42a347df84e77cd52bdd634575ea7b0bab1fce2a5610de61a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 379, + 225, + 534, + 306 + ], + "blocks": [ + { + "bbox": [ + 388, + 193, + 529, + 220 + ], + "lines": [ + { + "bbox": [ + 388, + 193, + 529, + 220 + ], + "spans": [ + { + "bbox": [ + 388, + 193, + 529, + 220 + ], + "type": "text", + "content": "(D) Cost Reductions with Inference Time Techniques" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 379, + 225, + 534, + 306 + ], + "lines": [ + { + "bbox": [ + 379, + 225, + 534, + 306 + ], + "spans": [ + { + "bbox": [ + 379, + 225, + 534, + 306 + ], + "type": "image", + "image_path": "fbd30ce96b94f3798861605fb3b986a8070b9c16bb602ce9ffd2872f3cda7836.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 318, + 543, + 380 + ], + "lines": [ + { + "bbox": [ + 51, + 318, + 543, + 380 + ], + "spans": [ + { + "bbox": [ + 51, + 318, + 543, + 380 + ], + "type": "text", + "content": "Figure 1: Highlights of the cost-of-pass framework and empirical analyses. Core concepts (left) set foundations for: (A) Comparing the Human Expert Baseline to the frontier achieved by the single most effective LM per task category. (B) Tracking the reduction in frontier cost-of-pass over time, indicating progress driven by new model releases (color-coded by family). (C) Quantifying the essential contribution of each model family: lightweight (less than $1 per million tokens), large, and reasoning; to the current cost-efficiency frontier, measured by the percentage of each family's contribution. (D) Assessing the economic benefit (relative cost reduction) achieved by applying common inference-time techniques over the baseline model frontier (which rarely results in meaningful gains)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 386, + 291, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 386, + 291, + 553 + ], + "spans": [ + { + "bbox": [ + 52, + 386, + 291, + 553 + ], + "type": "text", + "content": "These economic concepts are highly relevant to modern LMs, which inherently function as stochastic producers: for a given input, they yield a desired output (e.g., a correct solution) stochastically (Brown et al., 2024). Common practices such as employing scaffolds or more computationally intensive inference techniques (Snell et al., 2024; Madaan et al., 2023; Wang et al., 2023) represent efforts to manipulate this production process. These strategies seek to increase the probability of success but typically do so at the expense of higher computational cost, directly mirroring the economic trade-offs inherent in production efficiency. Motivated by these parallels and the economic goal of minimizing cost per successful output under uncertainty, we develop a quantitative framework tailored to LMs." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 559, + 230, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 559, + 230, + 571 + ], + "spans": [ + { + "bbox": [ + 52, + 559, + 230, + 571 + ], + "type": "text", + "content": "We summarize our contributions as follows." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 578, + 291, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 578, + 291, + 696 + ], + "spans": [ + { + "bbox": [ + 51, + 578, + 291, + 696 + ], + "type": "text", + "content": "Concepts. We introduce cost-of-pass (§2.2), which quantifies the expected monetary cost to achieve a successful output for a given problem. Building on this concept and incorporating a human-expert cost baseline, we define the frontier cost-of-pass as the minimum achievable cost-of-pass across all available options (LMs and human-expert) for that problem. We show these reveal distinct economic niches for model families (e.g., lightweight vs. reasoning models) on different tasks, which accuracy comparisons alone obscure (§3.2)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 52, + 703, + 290, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 703, + 290, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 703, + 290, + 715 + ], + "type": "text", + "content": "Tracking progress with frontier cost-of-pass. Using the" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 303, + 386, + 542, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 386, + 542, + 494 + ], + "spans": [ + { + "bbox": [ + 303, + 386, + 542, + 494 + ], + "type": "text", + "content": "cost-of-pass and frontier cost-of-pass, we analyze economic improvements across three task categories from May 2024 to February 2025. We observe an exponential decrease in frontier cost-of-pass across all tasks, though the trends vary. Notably, we observe that, over the past year, the expected cost of generating a correct solution to complex quantitative problems has been cut in half every few months. We find that the frontier cost-of-pass is driven primarily by lightweight models and reasoning models (§3.3)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 303, + 499, + 543, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 499, + 543, + 620 + ], + "spans": [ + { + "bbox": [ + 303, + 499, + 543, + 620 + ], + "type": "text", + "content": "Counterfactual frontier in the absence of model families. We show that our analysis reveals the complementary roles of different model types in driving recent progress. Innovations in lightweight models have been instrumental in reducing costs on basic quantitative tasks. Large models, by contrast, have been most impactful for knowledge-based benchmarks like GPQA Diamond (Rein et al., 2024). Meanwhile, reasoning models have been central to advances on complex quantitative reasoning challenges such as AIME (MAA, 2024) and MATH (Hendrycks et al., 2021) (" + }, + { + "bbox": [ + 303, + 499, + 543, + 620 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 303, + 499, + 543, + 620 + ], + "type": "text", + "content": " 3.4)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 303, + 625, + 544, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 625, + 544, + 709 + ], + "spans": [ + { + "bbox": [ + 303, + 625, + 544, + 709 + ], + "type": "text", + "content": "Impact of post-hoc inference time techniques. We observe that common test-time techniques such as self-refinement (Madaan et al., 2023) and majority voting (self-consistency; Wang et al., 2022) to improve performance offer either limited or no economic benefits, indicating that the recent reductions in frontier cost-of-pass have been mostly driven by model-level innovations (§ 3.5)." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 98, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 98, + 80 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 98, + 80 + ], + "type": "text", + "content": "2. Setup" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 254, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 254, + 99 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 254, + 99 + ], + "type": "text", + "content": "2.1. Economic Theory of Production Efficiency" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "spans": [ + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "content": "Classical production theory examines how producers convert inputs into outputs efficiently. Given a set of producers " + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{F} = \\{f_0, \\dots, f_{n-1}\\}" + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "content": ", we are often interested in the maximum output attainable for a given combination of inputs. If producing " + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "inline_equation", + "content": "u \\in \\mathbb{R}_{>0}" + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "content": " units of output requires an input vector " + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}_{\\geq 0}^k" + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "content": " (e.g., quantities of different resources), the input requirement set " + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "inline_equation", + "content": "P_u" + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "content": " contains all input vectors capable of producing at least " + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 52, + 106, + 291, + 201 + ], + "type": "text", + "content": " units:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 210, + 291, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 210, + 291, + 228 + ], + "spans": [ + { + "bbox": [ + 113, + 210, + 291, + 228 + ], + "type": "interline_equation", + "content": "P _ {u} = \\left\\{\\mathbf {x} \\mid \\max _ {f _ {i} \\in \\mathcal {F}} f _ {i} (\\mathbf {x}) \\geq u \\right\\}. \\tag {1}", + "image_path": "6a7b13390796c6478613ca64b9cf10c4a3baf8f8a05b9b4e501d16514ff5c7b5.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "spans": [ + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "text", + "content": "Based on this input requirement and a vector " + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "inline_equation", + "content": "\\mathbf{w_i} \\in \\mathbb{R}_{\\geq 0}^k" + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "text", + "content": " being the prices of the inputs (incurred by each producer " + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "text", + "content": "), the frontier cost for producing " + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 52, + 238, + 291, + 286 + ], + "type": "text", + "content": " units of output is the minimum cost required:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 294, + 291, + 315 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 294, + 291, + 315 + ], + "spans": [ + { + "bbox": [ + 123, + 294, + 291, + 315 + ], + "type": "interline_equation", + "content": "V _ {u} = \\min _ {\\mathbf {x} \\in P _ {u}, f _ {i} \\in \\mathcal {F}} \\mathbf {w} _ {\\mathbf {i}} ^ {T} \\mathbf {x}, \\tag {2}", + "image_path": "0ab620610ec8223db6b8994ced376a3e4057ef19e3c156b08c2e461f4d2866e2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "spans": [ + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": "subject to " + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "inline_equation", + "content": "f_{i}(\\mathbf{x}) \\geq u" + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": " implicitly included in " + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in P_u" + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": ". This " + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "inline_equation", + "content": "V_{u}" + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": " quantifies the lowest possible cost to achieve output " + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": " given the available production technologies " + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "inline_equation", + "content": "(\\mathcal{F})" + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": " and input prices " + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "inline_equation", + "content": "(\\mathbf{w_i})" + }, + { + "bbox": [ + 52, + 322, + 290, + 418 + ], + "type": "text", + "content": ". Farrell (1957) used these core concepts to build definitions for technical and price efficiency in a production ecosystem for producers. Critically, Aigner et al. (1977) extended this framework to handle stochastic production functions, where output is probabilistic for a given input." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 425, + 291, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 425, + 291, + 533 + ], + "spans": [ + { + "bbox": [ + 52, + 425, + 291, + 533 + ], + "type": "text", + "content": "Building on this economic foundation, we adapt the core concept of a frontier cost " + }, + { + "bbox": [ + 52, + 425, + 291, + 533 + ], + "type": "inline_equation", + "content": "(V_{u})" + }, + { + "bbox": [ + 52, + 425, + 291, + 533 + ], + "type": "text", + "content": " to represent the minimum achievable cost for obtaining a correct solution using LMs. Recognizing that a key aspect of LM behavior is its inherent stochasticity, an issue long addressed in economic production theory (Aigner et al., 1977), we incorporate this variability into our cost-efficiency metric. This enables us to align our framework with core production concepts and assess the economic impact of stochastic LM producers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 544, + 259, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 544, + 259, + 558 + ], + "spans": [ + { + "bbox": [ + 52, + 544, + 259, + 558 + ], + "type": "text", + "content": "2.2. Cost-of-Pass: An Efficiency Metric for LMs" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "spans": [ + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "text", + "content": "Here we instantiate the economic framework for language models (LMs). Consider a specific problem " + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "text", + "content": ", where the unit of production is a correct solution. We define a model " + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "text", + "content": " as an inference pipeline using an LM, acting as a stochastic producer. Two quantities characterize its efficiency on problem " + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 563, + 291, + 637 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "spans": [ + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "type": "inline_equation", + "content": "R_{m}(p) = \\mathrm{Prob.}" + }, + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "type": "text", + "content": " producing a correct answer on " + }, + { + "bbox": [ + 52, + 645, + 274, + 658 + ], + "type": "inline_equation", + "content": "p" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 660, + 298, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 660, + 298, + 673 + ], + "spans": [ + { + "bbox": [ + 52, + 660, + 298, + 673 + ], + "type": "inline_equation", + "content": "C_m(p) = \\text{Expected cost of one inference attempt by } m \\text{ on } p" + }, + { + "bbox": [ + 52, + 660, + 298, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "type": "text", + "content": "In the context of LMs, the inputs " + }, + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "type": "text", + "content": " correspond to resources like prompt and generated tokens, while the input prices " + }, + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 52, + 681, + 291, + 718 + ], + "type": "text", + "content": " represent the costs per token charged by the provider. The" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "text", + "content": "total cost of these inputs for a single inference attempt by model " + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "text", + "content": " on problem " + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "text", + "content": " is captured by " + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "inline_equation", + "content": "C_m(p)" + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "text", + "content": ", effectively instantiating the term " + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^T\\mathbf{x}" + }, + { + "bbox": [ + 304, + 67, + 542, + 114 + ], + "type": "text", + "content": " from the theory in the previous section." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "type": "text", + "content": "Since the model output is stochastic, the expected number of attempts to obtain the first correct solution is " + }, + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "type": "inline_equation", + "content": "1 / R_{m}(p)" + }, + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "type": "text", + "content": ", assuming independent trials. This yields the cost-of-pass, defined as the expected monetary cost to obtain one correct solution for problem " + }, + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 121, + 543, + 182 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 380, + 192, + 542, + 219 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 192, + 542, + 219 + ], + "spans": [ + { + "bbox": [ + 380, + 192, + 542, + 219 + ], + "type": "interline_equation", + "content": "v (m, p) = \\frac {C _ {m} (p)}{R _ {m} (p)}. \\tag {3}", + "image_path": "3499681d1dd7b5d2dc62410a01e17888d372d6c157900ebd5c05d64966620b55.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "spans": [ + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "text", + "content": "The cost-of-pass integrates both performance " + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "inline_equation", + "content": "(R_{m}(p))" + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "text", + "content": " and cost " + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "inline_equation", + "content": "(C_m(p))" + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "text", + "content": " into a single economically interpretable metric: it quantifies how efficiently financial resources are converted into correct outputs. This formulation mirrors classical production theory, where the goal is to assess the cost of achieving a specific target output (Farrell, 1957); in our case, the target is a correct solution. When a model cannot produce one " + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "inline_equation", + "content": "(R_{m}(p) = 0)" + }, + { + "bbox": [ + 303, + 229, + 544, + 338 + ], + "type": "text", + "content": ", the cost-of-pass becomes infinite, appropriately signaling infeasibility." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 350, + 455, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 455, + 361 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 455, + 361 + ], + "type": "text", + "content": "2.3. The LM Frontier Cost-of-Pass" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "spans": [ + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "content": "While cost-of-pass (§ 2.2) evaluates a single model's efficiency, understanding the overall state of LM capabilities for a given problem requires assessing the collective performance of the entire available LM ecosystem. Therefore, analogous to the frontier cost " + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "inline_equation", + "content": "V_{u}" + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "content": " (Eq. 2), we define the " + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "inline_equation", + "content": "LM" + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "content": " frontier cost-of-pass for problem " + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "content": " as the minimum cost-of-pass achievable using any available LM strategy " + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "content": " from the set " + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 303, + 369, + 544, + 464 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 371, + 476, + 542, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 476, + 542, + 495 + ], + "spans": [ + { + "bbox": [ + 371, + 476, + 542, + 495 + ], + "type": "interline_equation", + "content": "V _ {p} (\\mathcal {M}) = \\min _ {m \\in \\mathcal {M}} v (m, p). \\tag {4}", + "image_path": "47be0a10772b39136d58f63b010cf469741ce68516e4b1f263aca909dff5a51e.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M})" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": " quantifies the minimum expected cost to solve problem " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": " using the most cost-effective model currently available within the set " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": ". If no LM in " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": " can solve " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "R_{m}(p) = 0" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "m\\in \\mathcal{M}" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": "), then " + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M}) = \\infty" + }, + { + "bbox": [ + 304, + 506, + 543, + 556 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 567, + 538, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 538, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 538, + 590 + ], + "type": "text", + "content": "2.4. Grounding Evaluation: Estimated Human-Expert Baseline" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "text", + "content": "The LM frontier cost-of-pass " + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M})" + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "text", + "content": " reveals the best LM performance but lacks context: it does not show if LMs are economically advantageous over human labor. Moreover, the LM frontier cost-of-pass can be infinite if no LM succeeds. To address both, we introduce human-expert baseline as a reference point, by considering a human-expert annotator as a specific strategy: " + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "inline_equation", + "content": "m_{\\mathrm{expert}}" + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_0 = \\{m_{\\mathrm{expert}}\\}" + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "text", + "content": " represent this baseline set. We assume experts typically achieve near-perfect correctness " + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "inline_equation", + "content": "(R_{\\mathrm{expert}}(p) \\approx 1)" + }, + { + "bbox": [ + 303, + 597, + 543, + 718 + ], + "type": "text", + "content": " for tasks they are qualified for. Thus, the cost-of-pass for a qualified" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 267, + 81 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 267, + 81 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 267, + 81 + ], + "type": "text", + "content": "expert is approximately their labor cost per problem:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 119, + 90, + 291, + 105 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 90, + 291, + 105 + ], + "spans": [ + { + "bbox": [ + 119, + 90, + 291, + 105 + ], + "type": "interline_equation", + "content": "v (\\text {e x p e r t}, p) \\approx C _ {\\text {e x p e r t}} (p). \\tag {5}", + "image_path": "9d53b8cd44a84eba0df1eeb76dcbe7ecc97671c12f98e669fc6a7ce56b38edfa.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "spans": [ + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "text", + "content": "The estimation of " + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{expert}}(p)" + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "text", + "content": " involves considering required expertise, time per problem, and appropriate compensation rates (detailed in § 2.6.1). By incorporating this baseline, we define the frontier cost-of-pass for problem " + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "text", + "content": ", considering both LMs " + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "inline_equation", + "content": "(\\mathcal{M})" + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "text", + "content": " and the human-expert alternative " + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "inline_equation", + "content": "(\\mathcal{M}_0)" + }, + { + "bbox": [ + 52, + 113, + 291, + 174 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 183, + 291, + 198 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 183, + 291, + 198 + ], + "spans": [ + { + "bbox": [ + 78, + 183, + 291, + 198 + ], + "type": "interline_equation", + "content": "V _ {p} (\\mathcal {M} \\cup \\mathcal {M} _ {0}) = \\min \\left(V _ {p} (\\mathcal {M}), v (\\text {e x p e r t}, p)\\right). \\tag {6}", + "image_path": "662c2beaad31b19c239610a14b9523bf0e990241e88f3fcbe90b760c709f7e3c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "spans": [ + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "type": "text", + "content": "This frontier cost-of-pass represents the true minimum expected cost to obtain a correct solution for problem " + }, + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "type": "text", + "content": " using the best available option, whether it's an LM or a human. Crucially, " + }, + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M} \\cup \\mathcal{M}_{0})" + }, + { + "bbox": [ + 52, + 207, + 292, + 268 + ], + "type": "text", + "content": " is always finite (assuming finite human-expert cost and capability)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 280, + 228, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 280, + 228, + 293 + ], + "spans": [ + { + "bbox": [ + 52, + 280, + 228, + 293 + ], + "type": "text", + "content": "2.5. Measuring Progress and Value Gain" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "spans": [ + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": "To track improvements against the best available option over time, let " + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t" + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": " denote the total set of available strategies at time " + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": ", encompassing both the set of LM strategies released up to time " + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": " and the human-expert baseline " + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_0" + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": ", that is, " + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t = \\{m_{\\leq t}\\} \\cup \\mathcal{M}_0" + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": ". The frontier cost-of-pass achievable at time " + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 52, + 298, + 291, + 370 + ], + "type": "text", + "content": " can be calculated as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 380, + 291, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 380, + 291, + 399 + ], + "spans": [ + { + "bbox": [ + 116, + 380, + 291, + 399 + ], + "type": "interline_equation", + "content": "V _ {p} \\left(\\mathcal {M} _ {t}\\right) = \\min _ {m \\in \\mathcal {M} _ {t}} v (m, p). \\tag {7}", + "image_path": "d6ab12161c3f30622480c922bd2826ffd09e7b876cd0d4635e04bbc52454d303.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "spans": [ + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "content": "As new LM models " + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "inline_equation", + "content": "\\{m_t\\}" + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "content": " are released, the set expands such that " + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t = \\mathcal{M}_{t - 1} \\cup \\{m_t\\}" + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "content": ". Consequently, the frontier cost-of-pass " + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M}_{t})" + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "content": " forms a non-increasing sequence over time " + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "content": ", tracking the reduction in the minimum cost needed to solve a particular problem " + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 409, + 291, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "spans": [ + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "text", + "content": "To quantify the economic impact of new developments, we define the gain. When a new set of models " + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "inline_equation", + "content": "\\{m_t\\}" + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "text", + "content": " becomes available at time " + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "text", + "content": " (often a single model), the gain for problem " + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 475, + 291, + 523 + ], + "type": "text", + "content": " is the reduction it causes in the frontier cost-of-pass:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 58, + 533, + 290, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 533, + 290, + 558 + ], + "spans": [ + { + "bbox": [ + 58, + 533, + 290, + 558 + ], + "type": "interline_equation", + "content": "G _ {p} \\left(\\left\\{m _ {t} \\right\\}, \\mathcal {M} _ {t - 1}\\right) = V _ {p} \\left(\\mathcal {M} _ {t - 1}\\right) - V _ {p} \\left(\\mathcal {M} _ {t - 1} \\cup \\left\\{m _ {t} \\right\\}\\right). \\tag {8}", + "image_path": "7669a7a3ba5af13d09f43461712be4673c3d790558add3473d751fed207038d0.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "spans": [ + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "inline_equation", + "content": "G_{p}" + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "content": " measures how much cheaper the new model(s), " + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "inline_equation", + "content": "\\{m_t\\}" + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "content": ", make solving " + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "content": " compared to prior best options, including humans. Hence, a large " + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "inline_equation", + "content": "G_{p}" + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "content": " value indicates a significant economic contribution in solving " + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 52, + 567, + 291, + 652 + ], + "type": "text", + "content": ". This notion underlies our experiments, analyzing the value generated by models relative to the human baseline and tracking the evolution of the overall frontier." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "type": "text", + "content": "Extending to a distribution. Although measuring frontier cost-of-pass and value gain for individual problems can be informative, particularly through a fine-grained perspective, we often care about more than a single instance. Let " + }, + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "type": "inline_equation", + "content": "P \\sim D" + }, + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "type": "text", + "content": " be a set of problems sampled from a problem distribution " + }, + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 52, + 658, + 292, + 718 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 67, + 542, + 91 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 91 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 91 + ], + "type": "text", + "content": "We can then extend our definitions for such a distribution through the following:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 358, + 112, + 542, + 126 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 112, + 542, + 126 + ], + "spans": [ + { + "bbox": [ + 358, + 112, + 542, + 126 + ], + "type": "interline_equation", + "content": "V _ {p \\sim D} (\\mathcal {M} _ {t}) = \\mathbb {E} _ {p \\sim D} [ V _ {p} (\\mathcal {M} _ {t}) ], \\tag {9}", + "image_path": "1d8be8a5cde6ec3465198b80b6ba7628f0969f0f90b7df215617d85fa21b7dab.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 128, + 542, + 141 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 128, + 542, + 141 + ], + "spans": [ + { + "bbox": [ + 312, + 128, + 542, + 141 + ], + "type": "interline_equation", + "content": "G _ {p \\sim D} (\\{m _ {t} \\}, \\mathcal {M} _ {t - 1}) = \\mathbb {E} _ {p \\sim D} [ G _ {p} (\\{m _ {t} \\}, \\mathcal {M} _ {t - 1}) ]. \\tag {10}", + "image_path": "c99d5a6c57a920288412adc207ba75223db787139378e8b8e109f1e5f1a56eea.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 167, + 477, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 167, + 477, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 167, + 477, + 179 + ], + "type": "text", + "content": "2.6. Estimating the Economic Efficiency" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 186, + 543, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 186, + 543, + 211 + ], + "spans": [ + { + "bbox": [ + 304, + 186, + 543, + 211 + ], + "type": "text", + "content": "To operationalize our overall framework for any given distribution of problems, we introduce the following recipe:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 216, + 544, + 479 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "type": "text", + "content": "(1) Estimate success rates. For each model-problem pair " + }, + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "type": "inline_equation", + "content": "(m,p)" + }, + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "type": "text", + "content": ", generate a number of independent attempts to approximate " + }, + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "type": "inline_equation", + "content": "R_{m}(p)" + }, + { + "bbox": [ + 304, + 216, + 544, + 276 + ], + "type": "text", + "content": ". We use the same prompt and model settings across these attempts, varying only factors necessary to ensure independence (e.g., internal sampling randomness)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 281, + 544, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 544, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 544, + 354 + ], + "type": "text", + "content": "(2) Estimate per-attempt cost. Track the average number of tokens (prompt + generation) consumed per attempt, multiply by the current token price (which can differ by model provider or usage level), and add any extra charges (e.g., third-party API calls, external reasoning modules, etc.). This sum yields " + }, + { + "bbox": [ + 304, + 281, + 544, + 354 + ], + "type": "inline_equation", + "content": "C_m(p)" + }, + { + "bbox": [ + 304, + 281, + 544, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "text", + "content": "(3) Compute cost-of-pass. For each model " + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "text", + "content": ", calculate " + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "inline_equation", + "content": "v(m, p) = C_m(p) / R_m(p)" + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "text", + "content": ". (" + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "inline_equation", + "content": "R_m(p) = 0" + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "text", + "content": " yields " + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "inline_equation", + "content": "v(m, p) = \\infty" + }, + { + "bbox": [ + 304, + 358, + 542, + 396 + ], + "type": "text", + "content": ".)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "text", + "content": "(4) Determine frontier cost-of-pass. Estimate human-expert cost " + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "inline_equation", + "content": "v(\\text{expert}, p)" + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "text", + "content": " (see below). Find " + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M} \\cup \\mathcal{M}_{0})" + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "text", + "content": " for a given set of strategies " + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 401, + 543, + 437 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "content": "(5) Analyze over benchmarks. Aggregate " + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "inline_equation", + "content": "V_{p}(\\mathcal{M})" + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "content": " across problems " + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "inline_equation", + "content": "p \\sim D" + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "content": " to get " + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "inline_equation", + "content": "V_{p \\sim D}(\\mathcal{M}_t)" + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "content": ". Track progress over time (for " + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t" + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "content": ") and compute gain " + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "inline_equation", + "content": "G_{p \\sim D}" + }, + { + "bbox": [ + 304, + 443, + 542, + 479 + ], + "type": "text", + "content": " for new models." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 491, + 468, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 468, + 502 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 468, + 502 + ], + "type": "text", + "content": "2.6.1. Estimating Human-Expert Cost" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "spans": [ + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "type": "text", + "content": "To estimate " + }, + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "type": "inline_equation", + "content": "v(\\text{expert}, p)" + }, + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "type": "text", + "content": ", the plausible cost of obtaining a correct human-expert answer, we systematically determine the required qualifications, appropriate hourly compensation, and average time for a typical problem " + }, + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 303, + 509, + 544, + 689 + ], + "type": "text", + "content": " per dataset. We determine these quantities based on a hierarchy of evidence by prioritizing the dataset's creation process or associated studies (e.g., reported annotation pay/time (Parrish et al., 2022)). When direct data is absent, we leverage findings from closely related work (Zhang et al., 2024) or infer parameters from the dataset's context (e.g., deriving time-per-problem from contest rules (Art of Problem Solving, 2023)). Compensation rates are informed by reported study payments (Rein, 2024) or relevant market rates for comparable expertise (e.g., specialized tutoring rates (TutorCruncher, 2025; Wyzant Tutoring, 2025)).1" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 304, + 696, + 543, + 718 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 696, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 304, + 696, + 543, + 718 + ], + "type": "text", + "content": "1The full derivation, justification, and sources for our approach are detailed in Appendix A. The resulting estimates are in Table 3." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 64, + 491, + 253 + ], + "blocks": [ + { + "bbox": [ + 109, + 64, + 491, + 253 + ], + "lines": [ + { + "bbox": [ + 109, + 64, + 491, + 253 + ], + "spans": [ + { + "bbox": [ + 109, + 64, + 491, + 253 + ], + "type": "table", + "html": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.8e-50.192.7e-218.583.3815.33
GPT-4o mini5.4e-50.221.3e-225.382.0614.67
Llama-3.3-70B1.6e-40.167.4e-318.581.3110.67
Large Models
Llama-3.1-405B6.9e-40.146.7e-310.431.138.67
Claude Sonnet-3.52.1e-30.196.4e-314.062.5414.67
GPT-4o2.3e-30.176.2e-314.070.9614.01
Reasoning Models
OpenAI o1-mini5.4e-30.171.3e-212.270.504.80
OpenAI o11.9e-20.224.3e-28.070.902.85
DeepSeek-R11.8e-30.171.5e-214.570.213.41
OpenAI o3-mini1.1e-30.111.1e-28.180.762.03
", + "image_path": "522c2601cc01bbc0a0ebbd3ad816b7d64f965dcbdf9ec52f20e4ef99b0dd25fb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "lines": [ + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "spans": [ + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "text", + "content": "Table 1: Frontier dollar cost-of-pass per model / dataset. Each entry is the expected dollar cost of a problem " + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "inline_equation", + "content": "p \\sim D" + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "text", + "content": " with the presence of the model " + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "text", + "content": " and a human expert: " + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "inline_equation", + "content": "V_{p \\sim D}(\\{m\\} \\cup \\mathcal{M}_0)" + }, + { + "bbox": [ + 51, + 259, + 544, + 292 + ], + "type": "text", + "content": ". Per column, the 3 entries with the lowest value (i.e. best frontier cost-of-pass) have blue highlights. Different model families emerge as cost-effective at different task categories, highlighting the strengths of our evaluation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 297, + 134, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 297, + 134, + 310 + ], + "spans": [ + { + "bbox": [ + 52, + 297, + 134, + 310 + ], + "type": "text", + "content": "3. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 317, + 162, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 317, + 162, + 328 + ], + "spans": [ + { + "bbox": [ + 52, + 317, + 162, + 328 + ], + "type": "text", + "content": "3.1. Models and Datasets" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 335, + 253, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 335, + 253, + 348 + ], + "spans": [ + { + "bbox": [ + 52, + 335, + 253, + 348 + ], + "type": "text", + "content": "Models. We consider three categories of models:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 354, + 291, + 510 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 52, + 354, + 291, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 354, + 291, + 414 + ], + "spans": [ + { + "bbox": [ + 52, + 354, + 291, + 414 + ], + "type": "text", + "content": "(1) Lightweight models: We use the per-token cost as a proxy and select models with a cost less than $1 per million input and output tokens (see Table 4): Llama-3.1-8B (Grattafori et al., 2024), GPT-4o mini (OpenAI, 2024), and Llama-3.3-70B (Meta-AI, 2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 419, + 291, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 419, + 291, + 456 + ], + "spans": [ + { + "bbox": [ + 52, + 419, + 291, + 456 + ], + "type": "text", + "content": "(2) Large models: We select large general-purpose LMs: Llama-3.1-405B (Grattafiori et al., 2024), Claude Sonnet-3.5 (Anthropic, 2024), and GPT-4o (Hurst et al., 2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 461, + 291, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 461, + 291, + 510 + ], + "spans": [ + { + "bbox": [ + 52, + 461, + 291, + 510 + ], + "type": "text", + "content": "(3) Reasoning models: We select models with special reasoning post-training, including OpenAI's o1-mini (OpenAI et al., 2024), o1 (OpenAI et al., 2024), and o3-mini (OpenAI, 2025), as well as DeepSeek R1 (Guo et al., 2025)." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 515, + 290, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 515, + 290, + 575 + ], + "spans": [ + { + "bbox": [ + 52, + 515, + 290, + 575 + ], + "type": "text", + "content": "Within each category, we select three to four representative models released between the second half of 2024 and early 2025. To preserve the integrity of our temporal analysis, we prioritize the earliest stable releases and exclude research previews or experimental versions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 581, + 280, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 581, + 280, + 593 + ], + "spans": [ + { + "bbox": [ + 52, + 581, + 280, + 593 + ], + "type": "text", + "content": "Datasets. We evaluate models across three sets of tasks:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 599, + 291, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 52, + 599, + 291, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 599, + 291, + 658 + ], + "spans": [ + { + "bbox": [ + 52, + 599, + 291, + 658 + ], + "type": "text", + "content": "(1) Basic quantitative tasks: These involve basic numerical reasoning. We include an arithmetic dataset (Two Digit Addition) to assess basic numerical computation, and GSM8K (Cobbe et al., 2021) to evaluate multi-step grade-school level problem solving." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 664, + 291, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 664, + 291, + 712 + ], + "spans": [ + { + "bbox": [ + 52, + 664, + 291, + 712 + ], + "type": "text", + "content": "(2) Knowledge-based tasks: These require recalling and reasoning over factual knowledge. We include a scientific knowledge-intensive question answering task (GPQA-Diamond (Rein et al., 2024)) to evaluate models' abl" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 297, + 543, + 435 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 304, + 297, + 542, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 542, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 542, + 346 + ], + "type": "text", + "content": "ity to recall and utilize complex scientific facts, and a bias benchmark (BBQ (Parrish et al., 2022)) to evaluate whether models rely on stereotypical knowledge or can disambiguate factual responses from biased defaults." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 351, + 543, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 351, + 543, + 435 + ], + "spans": [ + { + "bbox": [ + 304, + 351, + 543, + 435 + ], + "type": "text", + "content": "(3) Complex quantitative reasoning tasks: These require complex mathematical reasoning and problem solving. We use MATH-500 (Hendrycks et al., 2021; Lightman et al., 2023) to assess models on competition-level maths problems, and AIME24 (MAA, 2024) to evaluate performance on challenging problems from the 2024 American Invitational Mathematics Examination." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 449, + 504, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 504, + 460 + ], + "type": "text", + "content": "3.2. Frontier Cost-of-Pass with a Single Model" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "spans": [ + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "text", + "content": "In this experiment, we aim to quantify the economic value each model " + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "text", + "content": " generates on different distributions of problems " + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "inline_equation", + "content": "p \\sim D" + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "text", + "content": ". For this, we take human-expert as a baseline and quantify the frontier cost-of-pass of a problem in the presence of the model " + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "inline_equation", + "content": "V_{p \\sim D}(\\{m\\} \\cup \\mathcal{M}_0)" + }, + { + "bbox": [ + 303, + 468, + 543, + 529 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 303, + 533, + 544, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 533, + 544, + 713 + ], + "spans": [ + { + "bbox": [ + 303, + 533, + 544, + 713 + ], + "type": "text", + "content": "The results in Table 1, highlighting the top three costs, show that our frontier cost-of-pass effectively captures how different model families offer economic advantages across various task categories. We find that lightweight models yield the lowest frontier cost-of-pass on basic quantitative tasks, such as Two Digit Addition. This is expected, as all model families achieve high accuracy on this dataset, making the least expensive models the most cost-effective. In contrast, for knowledge-based tasks, larger models achieve a lower frontier cost-of-pass compared to lightweight ones. While the reasoning models, such as o1, are priced significantly more expensively compared to both large and lightweight models, they lead to significant performance improvements, which, overall, result in reductions in the cost-of-pass mainly in complex quantitative tasks." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 82, + 213, + 194 + ], + "blocks": [ + { + "bbox": [ + 91, + 70, + 174, + 82 + ], + "lines": [ + { + "bbox": [ + 91, + 70, + 174, + 82 + ], + "spans": [ + { + "bbox": [ + 91, + 70, + 174, + 82 + ], + "type": "text", + "content": "Two Digit Addition" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 82, + 213, + 194 + ], + "lines": [ + { + "bbox": [ + 59, + 82, + 213, + 194 + ], + "spans": [ + { + "bbox": [ + 59, + 82, + 213, + 194 + ], + "type": "image", + "image_path": "50036048d55bd58bd00e762774ed0bdd190e68504cb77bf06c5f2d7a0774f944.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 227, + 82, + 380, + 193 + ], + "blocks": [ + { + "bbox": [ + 288, + 70, + 312, + 81 + ], + "lines": [ + { + "bbox": [ + 288, + 70, + 312, + 81 + ], + "spans": [ + { + "bbox": [ + 288, + 70, + 312, + 81 + ], + "type": "text", + "content": "BBQ" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 227, + 82, + 380, + 193 + ], + "lines": [ + { + "bbox": [ + 227, + 82, + 380, + 193 + ], + "spans": [ + { + "bbox": [ + 227, + 82, + 380, + 193 + ], + "type": "image", + "image_path": "13635eb971e0c3632f92759d60ba47cf2eea72475e047a2d710ad29a2d1c717f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 394, + 82, + 535, + 193 + ], + "blocks": [ + { + "bbox": [ + 445, + 70, + 489, + 81 + ], + "lines": [ + { + "bbox": [ + 445, + 70, + 489, + 81 + ], + "spans": [ + { + "bbox": [ + 445, + 70, + 489, + 81 + ], + "type": "text", + "content": "MATH500" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 394, + 82, + 535, + 193 + ], + "lines": [ + { + "bbox": [ + 394, + 82, + 535, + 193 + ], + "spans": [ + { + "bbox": [ + 394, + 82, + 535, + 193 + ], + "type": "image", + "image_path": "482987eb0bc339f8ccfedce3697ce3ff1811641a2ba6b8966508ad35ca724c8a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 60, + 215, + 202, + 327 + ], + "blocks": [ + { + "bbox": [ + 115, + 204, + 150, + 214 + ], + "lines": [ + { + "bbox": [ + 115, + 204, + 150, + 214 + ], + "spans": [ + { + "bbox": [ + 115, + 204, + 150, + 214 + ], + "type": "text", + "content": "GSM8K" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 215, + 202, + 327 + ], + "lines": [ + { + "bbox": [ + 60, + 215, + 202, + 327 + ], + "spans": [ + { + "bbox": [ + 60, + 215, + 202, + 327 + ], + "type": "image", + "image_path": "326a80519a5e711cd95c953ca4e4bfdf62a1180de18fd344f54905a9190e9426.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "lines": [ + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "type": "text", + "content": "Figure 2: The frontier dollar cost-of-pass (i.e. " + }, + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "type": "inline_equation", + "content": "V_{p\\sim D}(\\mathcal{M}_t)" + }, + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "type": "text", + "content": " steadily decreases with new model releases, spanning models released between May 2024 and February 2025. Y-axes are normalized (divided by " + }, + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "type": "inline_equation", + "content": "V_{p\\sim D}(\\mathcal{M}_0)" + }, + { + "bbox": [ + 52, + 335, + 542, + 357 + ], + "type": "text", + "content": ", shown in percentage (%))." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 225, + 217, + 380, + 327 + ], + "blocks": [ + { + "bbox": [ + 264, + 205, + 336, + 215 + ], + "lines": [ + { + "bbox": [ + 264, + 205, + 336, + 215 + ], + "spans": [ + { + "bbox": [ + 264, + 205, + 336, + 215 + ], + "type": "text", + "content": "GPQA Diamond" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 225, + 217, + 380, + 327 + ], + "lines": [ + { + "bbox": [ + 225, + 217, + 380, + 327 + ], + "spans": [ + { + "bbox": [ + 225, + 217, + 380, + 327 + ], + "type": "image", + "image_path": "832bfe627fe748fd7b35bea328fab3574c3707e52b56a17298ca44617e7fc88e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 392, + 215, + 537, + 327 + ], + "blocks": [ + { + "bbox": [ + 443, + 205, + 492, + 215 + ], + "lines": [ + { + "bbox": [ + 443, + 205, + 492, + 215 + ], + "spans": [ + { + "bbox": [ + 443, + 205, + 492, + 215 + ], + "type": "text", + "content": "AIME 2024" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 392, + 215, + 537, + 327 + ], + "lines": [ + { + "bbox": [ + 392, + 215, + 537, + 327 + ], + "spans": [ + { + "bbox": [ + 392, + 215, + 537, + 327 + ], + "type": "image", + "image_path": "3be70cbe724498608dfb9db5e73a4241a5c2f156493a7e6794bc974ffe034290.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "spans": [ + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "type": "text", + "content": "In contrast, when either task performance " + }, + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "type": "inline_equation", + "content": "(R_{m}(p\\sim D))" + }, + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "type": "text", + "content": " or cost " + }, + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "type": "inline_equation", + "content": "(C_m(p\\sim D)" + }, + { + "bbox": [ + 52, + 366, + 291, + 450 + ], + "type": "text", + "content": " is solely taken into account (Tables 5 and 6) such metrics tend to favor either reasoning models or lightweight models respectively due to their significant edge per criteria, without assessing the nuances in the economic impact they induce. This effectively highlights the sophistication of our metric and evaluation framework." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 463, + 288, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 463, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 52, + 463, + 288, + 475 + ], + "type": "text", + "content": "3.3. Tracking Frontier Cost-of-Pass with New Releases" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "spans": [ + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "type": "text", + "content": "In this experiment, we track the improvements on the frontier cost-of-pass for a problem. Figure 2 shows the trends of the cumulative gain per dataset " + }, + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "type": "inline_equation", + "content": "(V_{p\\sim D}(\\mathcal{M}_t))" + }, + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "type": "text", + "content": ", each updated by the corresponding model release " + }, + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "type": "inline_equation", + "content": "(\\mathcal{M}_{t - 1}\\cup \\{m_t\\})" + }, + { + "bbox": [ + 52, + 482, + 291, + 601 + ], + "type": "text", + "content": ". We observe a steady decline in the frontier cost-of-pass for complex quantitative tasks. In contrast, knowledge-based and basic quantitative tasks typically exhibit a sharp initial drop in frontier cost-of-pass with the early releases of models, followed by a plateau. To quantify the cost reduction trends, we empirically fit an exponential decay curve of the form:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 124, + 610, + 290, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 610, + 290, + 625 + ], + "spans": [ + { + "bbox": [ + 124, + 610, + 290, + 625 + ], + "type": "interline_equation", + "content": "V _ {p} \\left(M _ {t}\\right) \\approx a e ^ {- b t} + c, \\tag {11}", + "image_path": "ede810bce160eb35530e054ffba7c60f1bf8fe524c89eebf58828bb29e75b3a5.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": " denotes time in months since the first model release, and " + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": " are fit parameters. From this, we compute the time for the exponential component of the cost to drop by " + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "inline_equation", + "content": "T_{1/2} = \\ln(2)/b" + }, + { + "bbox": [ + 52, + 633, + 291, + 717 + ], + "type": "text", + "content": ". Using this formulation, we find that for complex quantitative tasks, between May 2024 and February 2025, the frontier cost-of-pass for MATH500 halved approximately every 2.6 months, whereas for AIME" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 366, + 544, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 544, + 391 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 544, + 391 + ], + "type": "text", + "content": "2024, the halving time was 7.1 months—indicating consistent cost reductions over the past year." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 403, + 531, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 531, + 427 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 531, + 427 + ], + "type": "text", + "content": "3.4. Essentialness of Model Families: Counterfactual Frontier Cost-of-Pass" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "spans": [ + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "text", + "content": "Section 3.3 showed the frontier cost-of-pass decreasing over time with new model releases. To understand which model families were most critical to this progress, we conduct a counterfactual analysis that quantifies the impact of removing each family. Defining " + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_g" + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "text", + "content": " as a family of models (lightweight, large, or reasoning), we measure the counterfactual contribution of family " + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "text", + "content": " on dataset " + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 303, + 434, + 543, + 540 + ], + "type": "text", + "content": " by calculating the relative improvement in frontier cost-of-pass attributable to its inclusion:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 370, + 547, + 542, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 547, + 542, + 574 + ], + "spans": [ + { + "bbox": [ + 370, + 547, + 542, + 574 + ], + "type": "interline_equation", + "content": "\\frac {G _ {p \\sim D} \\left(\\mathcal {M} _ {g} , \\mathcal {M} _ {T} \\backslash \\mathcal {M} _ {g}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {T}\\right)}. \\tag {12}", + "image_path": "656223ea2ac1042dfb13629ea5090a3c50b8b896f40895d187fa7bff7f374bc2.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "spans": [ + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_T" + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "text", + "content": " includes all models used in our experiments. This metric represents the relative improvement in the final frontier cost-of-pass " + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "inline_equation", + "content": "V_{p\\sim D}(\\mathcal{M}_T)" + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "text", + "content": " attributable to the model family " + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_g" + }, + { + "bbox": [ + 303, + 580, + 543, + 640 + ], + "type": "text", + "content": ", with higher values indicating greater essentialness of that family for achieving the current frontier." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "type": "text", + "content": "Figure 3 illustrates our main findings, revealing distinct roles across model families. Lightweight models help reduce the frontier cost-of-pass on basic quantitative tasks, while large models drive performance on knowledge-intensive tasks. Reasoning models play a key role in advancing the frontier for complex quantitative reasoning and also improve" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 67, + 486, + 194 + ], + "blocks": [ + { + "bbox": [ + 111, + 67, + 486, + 194 + ], + "lines": [ + { + "bbox": [ + 111, + 67, + 486, + 194 + ], + "spans": [ + { + "bbox": [ + 111, + 67, + 486, + 194 + ], + "type": "image", + "image_path": "9d65079fdb587bed54bc16355915a89bba1090a93865eeaf66281756361e237d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "lines": [ + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "text", + "content": "Figure 3: The relative improvement " + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "text", + "content": " in frontier cost-of-pass attributable to each model family " + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "text", + "content": ", calculated under a counterfactual setting where " + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_g" + }, + { + "bbox": [ + 52, + 209, + 541, + 232 + ], + "type": "text", + "content": " is removed. Higher values signify greater essentialness for maintaining the current frontier." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 73, + 240, + 523, + 309 + ], + "blocks": [ + { + "bbox": [ + 73, + 240, + 523, + 309 + ], + "lines": [ + { + "bbox": [ + 73, + 240, + 523, + 309 + ], + "spans": [ + { + "bbox": [ + 73, + 240, + 523, + 309 + ], + "type": "table", + "html": "
Inference Time TechniqueBasic QuantitativeKnowledge BasedComplex Quantitative
Two Digit AdditionGSM8KBBQGPQA DiamondMATH500AIME24
Self-Refine006.724.900
Maj. Vote (k=3)000000
Maj. Vote (k=4)000000
", + "image_path": "d5d1dd0f4fce7f531bc4229a090b9b204042e706c64543dd30f1b2553c279874.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 119, + 315, + 474, + 327 + ], + "lines": [ + { + "bbox": [ + 119, + 315, + 474, + 327 + ], + "spans": [ + { + "bbox": [ + 119, + 315, + 474, + 327 + ], + "type": "text", + "content": "Table 2: Relative performance gains (%) from different inference time techniques across datasets." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 333, + 290, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 333, + 290, + 357 + ], + "spans": [ + { + "bbox": [ + 52, + 333, + 290, + 357 + ], + "type": "text", + "content": "performance on GPQA-Diamond, as well as GSM8K, which benefits from small reasoning models like o3-mini." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 363, + 291, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 363, + 291, + 471 + ], + "spans": [ + { + "bbox": [ + 52, + 363, + 291, + 471 + ], + "type": "text", + "content": "These findings highlight that progress on different task types is driven by different model paradigms. While large models have brought clear gains on knowledge-intensive tasks (e.g., GPQA), recent improvements in cost-efficiency—especially in more quantitative domains—appear largely driven by advances in lightweight and reasoning models. Together, these results suggest that the current cost-efficiency frontier, as reflected in our framework, is shaped mainly by (i) lightweight models and (ii) reasoning models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 483, + 285, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 483, + 285, + 507 + ], + "spans": [ + { + "bbox": [ + 52, + 483, + 285, + 507 + ], + "type": "text", + "content": "3.5. Impact of Inference Time Techniques on Frontier Cost-of-Pass" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 514, + 291, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 514, + 291, + 587 + ], + "spans": [ + { + "bbox": [ + 52, + 514, + 291, + 587 + ], + "type": "text", + "content": "We now assess whether common inference-time techniques provide meaningful economic benefits. Specifically, we ask: is it cost-effective to improve model performance through these techniques, compared to relying on the models' baseline performance? To explore this, we focus on the set of lightweight and large models, denoted by " + }, + { + "bbox": [ + 52, + 514, + 291, + 587 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_L" + }, + { + "bbox": [ + 52, + 514, + 291, + 587 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "spans": [ + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "text", + "content": "First, we determine the frontier cost-of-pass achieved by " + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_L" + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "text", + "content": " without any modifications. We then apply a given inference-time technique uniformly across all models in " + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_L" + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "text", + "content": ", yielding a modified set " + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_L^*" + }, + { + "bbox": [ + 52, + 592, + 291, + 664 + ], + "type": "text", + "content": ". The gain from this technique, measured relative to the original frontier cost-of-pass, can be computed as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 672, + 291, + 699 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 672, + 291, + 699 + ], + "spans": [ + { + "bbox": [ + 129, + 672, + 291, + 699 + ], + "type": "interline_equation", + "content": "\\frac {G _ {p \\sim D} \\left(\\mathcal {M} _ {L} ^ {*} , \\mathcal {M} _ {L}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {L}\\right)}. \\tag {13}", + "image_path": "4b8807ce16c96c9c219fb85d28b143bb77eeb299eced28692be856c6c02086e6.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 705, + 292, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 705, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 705, + 292, + 718 + ], + "type": "text", + "content": "In this study, we consider two popular techniques: self-" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 333, + 543, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 333, + 543, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 333, + 543, + 357 + ], + "type": "text", + "content": "refinement (Madaan et al., 2023) and majority voting (a.k.a. self-consistency; Wang et al., 2023), with 3 and 4 votes." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 363, + 544, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 363, + 544, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 363, + 544, + 434 + ], + "type": "text", + "content": "As shown in Table 2, self-refinement shows moderate economic benefit on knowledge-intensive tasks, with a notable " + }, + { + "bbox": [ + 304, + 363, + 544, + 434 + ], + "type": "inline_equation", + "content": "24.9\\%" + }, + { + "bbox": [ + 304, + 363, + 544, + 434 + ], + "type": "text", + "content": " improvement on GPQA Diamond. In contrast, majority voting—despite potentially enhancing raw accuracy—does not offer relative economic improvement across the tested models and datasets." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 441, + 543, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 441, + 543, + 524 + ], + "spans": [ + { + "bbox": [ + 303, + 441, + 543, + 524 + ], + "type": "text", + "content": "Collectively, these findings suggest, at least for the evaluated techniques, that the increased computational costs generally outweigh the performance benefits relative to the frontier cost-of-pass established by the baseline models. This implies that these common inference-time approaches may not be sufficient on their own to yield significant economic benefits within our evaluation framework for now." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 540, + 397, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 397, + 553 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 397, + 553 + ], + "type": "text", + "content": "4. Related Works" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 560, + 542, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 560, + 542, + 657 + ], + "spans": [ + { + "bbox": [ + 303, + 560, + 542, + 657 + ], + "type": "text", + "content": "Economic perspectives and broader impacts. The efficiency of LMs carries significant economic implications, as they are viewed as general-purpose technologies impacting productivity and labor (Eloundou et al., 2024; Brynjolfsson et al., 2025). Complementary economic analyses explore provider strategies regarding pricing and product design Bergemann et al. (2025), and user-side decision-making involving ROI, token costs, and success probabilities." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 303, + 662, + 543, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 662, + 543, + 711 + ], + "spans": [ + { + "bbox": [ + 303, + 662, + 543, + 711 + ], + "type": "text", + "content": "Our cost-of-pass metric serves as a crucial bridge between these technical realities of model performance and their economic consequences. By providing a fundamental measure, the expected monetary cost to successfully complete" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 117 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 117 + ], + "type": "text", + "content": "a task, it allows for quantifying the economic contribution of specific AI systems and informs rational model selection for achieving economic viability, and provides quantitative perspective on the economic evolution of the LM ecosystem." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 121, + 292, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 121, + 292, + 253 + ], + "spans": [ + { + "bbox": [ + 52, + 121, + 292, + 253 + ], + "type": "text", + "content": "LM resource consumption, efficiency optimization and benchmarking. Research increasingly recognizes the importance of LM resource consumption and efficiency. Studies have quantified operational costs like tokens (Chen et al., 2023) and energy (Maliakel et al., 2025), revealing task-dependent performance and potential diminishing returns from high expenditure (Miserendino et al., 2025). This focus has intensified with the rise of reasoning methodologies (Sui et al., 2025) and inference-time techniques (e.g., Madaan et al. (2023); Wang et al. (2023)), which often trade increased computational cost for potential accuracy gains." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 258, + 291, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 258, + 291, + 368 + ], + "spans": [ + { + "bbox": [ + 52, + 258, + 291, + 368 + ], + "type": "text", + "content": "Concerns like \"overthinking,\" where lengthy processing fails to improve results (Chen et al., 2024; Cuadron et al., 2025), have spurred efforts to optimize resource use through methods like dynamic token budgeting (Han et al., 2025), specialized training (Arora & Zanette, 2025), prompt engineering (Xu et al., 2025; Aytes et al., 2025) or researching optimal reasoning lengths (Wu et al., 2025; Yang et al., 2025). Concurrently, evaluation methodologies have evolved beyond pure accuracy or correctness measures." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 372, + 292, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 372, + 292, + 504 + ], + "spans": [ + { + "bbox": [ + 52, + 372, + 292, + 504 + ], + "type": "text", + "content": "Recognizing its insufficiency, researchers have incorporated cost via fixed budgets (Wang et al., 2024), performance heuristics (McDonald et al., 2024), or non-monetary metrics like conciseness (Nayab et al., 2024). Kapoor et al. (2024) strongly advocated for using real dollar costs and accounting for stochasticity—factors central to our approach. Benchmarking efforts have also highlighted diminishing returns from simply scaling inference computation (Parashar et al., 2025). While these works underscore the need for cost-aware analysis, they often rely on specific constraints (e.g., fixed budgets) or heuristic metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 510, + 292, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 510, + 292, + 571 + ], + "spans": [ + { + "bbox": [ + 52, + 510, + 292, + 571 + ], + "type": "text", + "content": "Our cost-of-pass framework seeks to advance this by providing a single, interpretable metric grounded in economic production principles, offering a unified way to assess the economic viability of different models and techniques without predefined budget assumptions or proxy metrics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 585, + 126, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 585, + 126, + 597 + ], + "spans": [ + { + "bbox": [ + 52, + 585, + 126, + 597 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 605, + 292, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 605, + 292, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 605, + 292, + 715 + ], + "type": "text", + "content": "We introduced an economic framework designed to evaluate language models by integrating their performance with inference cost. Drawing from production theory, we conceptualize language models as stochastic producers, and assess their efficiency using our proposed cost-of-pass metric, which measures the expected cost per correct solution. Our analysis utilizes this metric alongside the frontier cost-of-pass, defined as the minimum achievable cost compared to an human expert baseline. This approach reveals distinct" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 67, + 543, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 543, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 543, + 224 + ], + "type": "text", + "content": "economic roles played by different model classes. For instance, retrospective and counterfactual evaluations demonstrate that lightweight models primarily drive efficiency on basic tasks, whereas reasoning models are essential for complex problem-solving. Critically, our findings show that common inference-time techniques typically increase the cost-of-pass, thus failing to provide net economic benefits when compared to the progress made by improving the underlying models themselves. In conclusion, our framework offers a principled foundation for measuring language model innovation in economic terms. It serves as a valuable tool for guiding model selection and aligning AI development with real-world value." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 238, + 402, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 238, + 402, + 251 + ], + "spans": [ + { + "bbox": [ + 305, + 238, + 402, + 251 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 258, + 544, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 544, + 355 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 544, + 355 + ], + "type": "text", + "content": "We thank Federico Bianchi, Dan Jurafsky, Daniel E. Ho, Can Yesildere, and Semyon Lomasov for valuable comments and discussions in the early stages of this project. MHE gratefully acknowledges support from the Fulbright Foreign Student Program. BE gratefully acknowledges the support of the Stanford Knight-Hennessy Scholarship. MS gratefully acknowledges the support of an HAI-SAP Fellowship." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 369, + 364, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 369, + 364, + 382 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 364, + 382 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 388, + 544, + 718 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 306, + 388, + 544, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 388, + 544, + 437 + ], + "spans": [ + { + "bbox": [ + 306, + 388, + 544, + 437 + ], + "type": "text", + "content": "1st grade 4th quarter expectations – fast facts timed tests. Elementary School Curriculum Note (online PDF), 2021. States 20–25 addition problems should be solved in 1 minute (2–3 sec each) (Fas, 2021)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 445, + 544, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 445, + 544, + 493 + ], + "spans": [ + { + "bbox": [ + 306, + 445, + 544, + 493 + ], + "type": "text", + "content": "Daron Acemoglu. The Simple Macroeconomics of AI. NBER Working Papers 32487, National Bureau of Economic Research, Inc, May 2024. URL https://ideas.repec.org/p/nbr/nberwo/32487.html." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 501, + 544, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 501, + 544, + 584 + ], + "spans": [ + { + "bbox": [ + 306, + 501, + 544, + 584 + ], + "type": "text", + "content": "Dennis Aigner, C.A.Knox Lovell, and Peter Schmidt. Formulation and estimation of stochastic frontier production function models. Journal of Econometrics, 6(1):21-37, 1977. ISSN 0304-4076. doi: https://doi.org/10.1016/0304-4076(77)90052-5. URL https://www.sciencedirect.com/science/article/pii/0304407677900525." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 594, + 543, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 594, + 543, + 628 + ], + "spans": [ + { + "bbox": [ + 306, + 594, + 543, + 628 + ], + "type": "text", + "content": "Anthropic. Claude 3.5 sonnet announcement, 2024. URL https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 13 Feb. 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 637, + 544, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 637, + 544, + 673 + ], + "spans": [ + { + "bbox": [ + 306, + 637, + 544, + 673 + ], + "type": "text", + "content": "Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 681, + 544, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 681, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 306, + 681, + 544, + 718 + ], + "type": "text", + "content": "Art of Problem Solving. American Invitational Mathematics Examination (AIME) Format. AoPS Wiki (aops.com), 2023. States AIME is 15 questions in 3 hours (12 min" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 68, + 291, + 717 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 63, + 68, + 290, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 68, + 290, + 91 + ], + "spans": [ + { + "bbox": [ + 63, + 68, + 290, + 91 + ], + "type": "text", + "content": "per problem) (Art of Problem Solving, 2023). Accessed Mar 25, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 99, + 291, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 99, + 291, + 146 + ], + "spans": [ + { + "bbox": [ + 53, + 99, + 291, + 146 + ], + "type": "text", + "content": "Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 156, + 291, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 156, + 291, + 202 + ], + "spans": [ + { + "bbox": [ + 54, + 156, + 291, + 202 + ], + "type": "text", + "content": "Dirk Bergemann, Alessandro Bonatti, and Alex Smolin. The economics of large language models: Token allocation, fine-tuning, and optimal pricing. arXiv preprint arXiv:2502.07736, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 211, + 291, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 211, + 291, + 270 + ], + "spans": [ + { + "bbox": [ + 54, + 211, + 291, + 270 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 279, + 291, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 279, + 291, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 279, + 291, + 315 + ], + "type": "text", + "content": "Erik Brynjolfsson, Danielle Li, and Lindsey Raymond. *Generative ai at work.* The *Quarterly Journal of Economics*, pp. qjae044, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 323, + 291, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 323, + 291, + 370 + ], + "spans": [ + { + "bbox": [ + 53, + 323, + 291, + 370 + ], + "type": "text", + "content": "Lingjiao Chen, Matei Zaharia, and James Zou. Frugalgpt: How to use large language models while reducing cost and improving performance. arXiv preprint arXiv:2305.05176, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 379, + 291, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 379, + 291, + 437 + ], + "spans": [ + { + "bbox": [ + 53, + 379, + 291, + 437 + ], + "type": "text", + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for " + }, + { + "bbox": [ + 53, + 379, + 291, + 437 + ], + "type": "inline_equation", + "content": "2+" + }, + { + "bbox": [ + 53, + 379, + 291, + 437 + ], + "type": "inline_equation", + "content": "3=" + }, + { + "bbox": [ + 53, + 379, + 291, + 437 + ], + "type": "text", + "content": "? on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 446, + 291, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 446, + 291, + 506 + ], + "spans": [ + { + "bbox": [ + 53, + 446, + 291, + 506 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 514, + 291, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 291, + 585 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 291, + 585 + ], + "type": "text", + "content": "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 594, + 291, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 594, + 291, + 629 + ], + "spans": [ + { + "bbox": [ + 53, + 594, + 291, + 629 + ], + "type": "text", + "content": "Tyna Eloundou, Sam Manning, Pamela Mishkin, and Daniel Rock. Gpts are gpts: Labor market impact potential of llms. Science, 384(6702):1306-1308, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 638, + 291, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 638, + 291, + 673 + ], + "spans": [ + { + "bbox": [ + 53, + 638, + 291, + 673 + ], + "type": "text", + "content": "Michael James Farrell. The measurement of productive efficiency. Journal of the royal statistical society: series A (General), 120(3):253-281, 1957." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 681, + 291, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 291, + 717 + ], + "type": "text", + "content": "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan," + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 68, + 542, + 717 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 315, + 68, + 541, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 68, + 541, + 91 + ], + "spans": [ + { + "bbox": [ + 315, + 68, + 541, + 91 + ], + "type": "text", + "content": "et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 99, + 542, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 99, + 542, + 158 + ], + "spans": [ + { + "bbox": [ + 306, + 99, + 542, + 158 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 167, + 542, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 167, + 542, + 213 + ], + "spans": [ + { + "bbox": [ + 306, + 167, + 542, + 213 + ], + "type": "text", + "content": "Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. Token-budget-aware llm reasoning, 2025. URL https://arxiv.org/abs/2412.18547." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 222, + 542, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 222, + 542, + 305 + ], + "spans": [ + { + "bbox": [ + 306, + 222, + 542, + 305 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 313, + 542, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 313, + 542, + 361 + ], + "spans": [ + { + "bbox": [ + 306, + 313, + 542, + 361 + ], + "type": "text", + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihindra, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 369, + 542, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 369, + 542, + 404 + ], + "spans": [ + { + "bbox": [ + 306, + 369, + 542, + 404 + ], + "type": "text", + "content": "Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 412, + 542, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 412, + 542, + 472 + ], + "spans": [ + { + "bbox": [ + 306, + 412, + 542, + 472 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 479, + 542, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 479, + 542, + 526 + ], + "spans": [ + { + "bbox": [ + 306, + 479, + 542, + 526 + ], + "type": "text", + "content": "MAA. American Invitational Mathematics Examination (AIME). https://maa.org/maa-invitational-competitions/, 2024. Accessed: 2025-03-25." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 535, + 542, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 535, + 542, + 605 + ], + "spans": [ + { + "bbox": [ + 306, + 535, + 542, + 605 + ], + "type": "text", + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 614, + 542, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 614, + 542, + 662 + ], + "spans": [ + { + "bbox": [ + 306, + 614, + 542, + 662 + ], + "type": "text", + "content": "Paul Joe Maliakel, Shashikant Ilager, and Ivona Brandic. Investigating energy efficiency and performance trade-offs in llm inference across tasks and dvfs settings. arXiv preprint arXiv:2501.08219, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 670, + 542, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 542, + 717 + ], + "type": "text", + "content": "Tyler McDonald, Anthony Colosimo, Yifeng Li, and Ali Emami. Can we afford the perfect prompt? balancing cost and accuracy with the economical prompting index. arXiv preprint arXiv:2412.01690, 2024." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 294, + 717 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 53, + 67, + 292, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 292, + 102 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 292, + 102 + ], + "type": "text", + "content": "Meta-AI. Llama 3.3 70b instruct model, 2024. URL https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 111, + 292, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 111, + 292, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 111, + 292, + 159 + ], + "type": "text", + "content": "Samuel Miserendino, Michele Wang, Tejal Patwardhan, and Johannes Heidecke. Swe-lancer: Can frontier llms earn $1 million from real-world freelance software engineering? arXiv preprint arXiv:2502.12115, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 167, + 292, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 167, + 292, + 226 + ], + "spans": [ + { + "bbox": [ + 53, + 167, + 292, + 226 + ], + "type": "text", + "content": "Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicolamaria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 294, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 294, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 294, + 271 + ], + "type": "text", + "content": "OpenAI. Gpt-4o mini: Advancing cost-efficient intelligence, 2024. URL https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 279, + 292, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 279, + 292, + 303 + ], + "spans": [ + { + "bbox": [ + 53, + 279, + 292, + 303 + ], + "type": "text", + "content": "OpenAI. Openai o3-mini system card, 2025. URL https://openai.com/index/o3-mini-system-card/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 310, + 292, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 310, + 292, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 310, + 292, + 717 + ], + "type": "text", + "content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quinonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 543, + 717 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 313, + 67, + 543, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 67, + 543, + 547 + ], + "spans": [ + { + "bbox": [ + 313, + 67, + 543, + 547 + ], + "type": "text", + "content": "Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan Thibault Sottiaux Thomas Degry Thomas Dimson Tianhao Zheng Timur Garipov Tom Stasi Trapit Bansal. Trevor Creech Troy Peterson Tyna Eloundou Valerie Qi,Vineet Kosaraju,Vinnie Monaco,Vitchyr Pong,Vlad Fomenko Weiyi ZhengWenda ZhouWes McCabe Wojciech ZarembaYann Dubois Yinghai LuYining Chen Young ChaYu BaiYuchen He,Yuchen Zhang,Yunyun Wang,Zheng Shao,and Zhuohan Li. Openai o1 system card2024. URL https://arxiv.org/abs/2412.16720." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 553, + 543, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 553, + 543, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 553, + 543, + 613 + ], + "type": "text", + "content": "Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 621, + 543, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 543, + 717 + ], + "type": "text", + "content": "Alicia Parrish, Angelica Chen, Nikita Nangia, Vishakh Padmakumar, Jason Phang, Jana Thompson, Phu Mon Htut, and Samuel Bowman. BBQ: A hand-built bias benchmark for question answering. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 2086-2105, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 68, + 291, + 716 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 63, + 68, + 291, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 68, + 291, + 91 + ], + "spans": [ + { + "bbox": [ + 63, + 68, + 291, + 91 + ], + "type": "text", + "content": "findings-acl.165. URL https://aclanthology.org/2022.findings-acl.165/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 100, + 291, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 100, + 291, + 148 + ], + "spans": [ + { + "bbox": [ + 53, + 100, + 291, + 148 + ], + "type": "text", + "content": "David Rein. Can good benchmarks contain mistakes? NYU Alignment Research Group Blog, May 2024. Reveals GPQA expert pay (\\(100/hr) and non-expert solve times (Rein, 2024). Online: wp.nyu.edu/...mistakes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 156, + 291, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 156, + 291, + 217 + ], + "spans": [ + { + "bbox": [ + 53, + 156, + 291, + 217 + ], + "type": "text", + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof qa benchmark. In First Conference on Language Modeling, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 225, + 291, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 225, + 291, + 273 + ], + "spans": [ + { + "bbox": [ + 53, + 225, + 291, + 273 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling lIm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 281, + 291, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 281, + 291, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 281, + 291, + 342 + ], + "type": "text", + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 350, + 291, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 350, + 291, + 399 + ], + "spans": [ + { + "bbox": [ + 53, + 350, + 291, + 399 + ], + "type": "text", + "content": "TutorCruncher. Average tutoring rates use: How much do tutors charge per hour? TutorCruncher Blog, Feb 2025. Reports " + }, + { + "bbox": [ + 53, + 350, + 291, + 399 + ], + "type": "inline_equation", + "content": "45-" + }, + { + "bbox": [ + 53, + 350, + 291, + 399 + ], + "type": "text", + "content": "100/hr as typical range for test-prep tutoring (TutorCruncher, 2025)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 407, + 291, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 407, + 291, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 407, + 291, + 456 + ], + "type": "text", + "content": "Upwork. Data entry specialist hourly rates (cost to hire data entry specialist). Upwork Hiring Guide, 2025. Median " + }, + { + "bbox": [ + 53, + 407, + 291, + 456 + ], + "type": "inline_equation", + "content": "13/hr for data entry freelancers;" + }, + { + "bbox": [ + 53, + 407, + 291, + 456 + ], + "type": "text", + "content": "10–$20/hr typical range (Upwork, 2025). Accessed Mar 25, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 464, + 291, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 464, + 291, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 464, + 291, + 512 + ], + "type": "text", + "content": "Junlin Wang, Siddhartha Jain, Dejiao Zhang, Baishakhi Ray, Varun Kumar, and Ben Athiwaratkun. Reasoning in token economies: Budget-aware evaluation of llm reasoning strategies. arXiv preprint arXiv:2406.06461, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 520, + 291, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 520, + 291, + 580 + ], + "spans": [ + { + "bbox": [ + 53, + 520, + 291, + 580 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 589, + 291, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 589, + 291, + 661 + ], + "spans": [ + { + "bbox": [ + 53, + 589, + 291, + 661 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PL1NIMMrw." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 669, + 291, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 669, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 53, + 669, + 291, + 716 + ], + "type": "text", + "content": "Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms. arXiv preprint arXiv:2502.07266, 2025." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 542, + 330 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 306, + 67, + 542, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 542, + 126 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 542, + 126 + ], + "type": "text", + "content": "Wyzant Tutoring. New jersey math tutors cost " + }, + { + "bbox": [ + 306, + 67, + 542, + 126 + ], + "type": "inline_equation", + "content": "33 -" + }, + { + "bbox": [ + 306, + 67, + 542, + 126 + ], + "type": "text", + "content": "55 per hour on average. Wyzant.com (tutoring rate listing), 2025. Average private tutoring rates for math (K-12 and competition) (Wyzant Tutoring, 2025). Accessed Mar 25, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 135, + 542, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 135, + 542, + 171 + ], + "spans": [ + { + "bbox": [ + 306, + 135, + 542, + 171 + ], + "type": "text", + "content": "Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 178, + 542, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 178, + 542, + 215 + ], + "spans": [ + { + "bbox": [ + 306, + 178, + 542, + 215 + ], + "type": "text", + "content": "Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for ltm reasoning. arXiv preprint arXiv:2502.18080, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 223, + 542, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 223, + 542, + 330 + ], + "spans": [ + { + "bbox": [ + 306, + 223, + 542, + 330 + ], + "type": "text", + "content": "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Charlotte Zhuang, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. In NeurIPS 2024 Datasets and Benchmarks Track, 2024. Reports human solve rate on GSM8K: 4 problems/15 min (3.7 min each) (Zhang et al., 2024)." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 57 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 284, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 284, + 80 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 284, + 80 + ], + "type": "text", + "content": "A. Details of Human Expert Cost Estimation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 291, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 291, + 277 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 291, + 277 + ], + "type": "text", + "content": "In this section, we introduce the detailed analysis of how the human expert costs in Table 3 are calculated per dataset. AIME (American Invitational Mathematics Examination) consists of 15 challenging math problems in a 3-hour contest (administered in two separate sections: AIME I & II), giving an average of about 12 minutes per problem (Art of Problem Solving, 2023). In practice, expert math tutors for competitions like AIME command high hourly fees in the range of " + }, + { + "bbox": [ + 52, + 87, + 291, + 277 + ], + "type": "inline_equation", + "content": "45 -" + }, + { + "bbox": [ + 52, + 87, + 291, + 277 + ], + "type": "text", + "content": "100, reflecting intensive test-preparation rates (TutorCruncher, 2025). This rate range aligns with specialized test prep tutoring in the US, which is higher than regular tutoring due to the advanced problem-solving skills required (TutorCruncher, 2025). At roughly 12 minutes per AIME question on average, a solver could handle about five such problems per hour under exam conditions (Art of Problem Solving, 2023)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 284, + 292, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 284, + 292, + 428 + ], + "spans": [ + { + "bbox": [ + 52, + 284, + 292, + 428 + ], + "type": "text", + "content": "BBQ (Bias Benchmark for QA) contains short question-answer scenarios targeting social bias. Crowdworkers annotating BBQ have been paid around $15 per hour, a rate chosen to exceed U.S. minimum wage (Parrish et al., 2022). Because each task includes multiple BBQ questions, workers were able to answer roughly 5 questions in 2 minutes (Parrish et al., 2022) - i.e. ~24 seconds per question, or about 0.4 minutes per question. This fast per-question time reflects the fact that BBQ items are short multiple-choice queries, allowing a human annotator to complete approximately 150 BBQ questions in an hour at that pay rate (Parrish et al., 2022)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 434, + 291, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 434, + 291, + 577 + ], + "spans": [ + { + "bbox": [ + 52, + 434, + 291, + 577 + ], + "type": "text", + "content": "GPQA Diamond consists of extremely difficult graduate-level science questions, so human experts demand high compensation. In one case, domain experts were paid about \\(100 per hour to contribute and validate GPQA questions (Rein et al., 2024). These questions are \"Google-proof\" and time-consuming: skilled non-expert participants spent over 30-35 minutes on average per question when attempting to solve GPQA problems with unrestricted web access (Rein et al., 2024). This long duration per question underscores GPQA's complexity – at most 2 questions could be solved in an hour even by motivated annotators, which justifies the premium expert hourly rate (Rein, 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 582, + 291, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 582, + 291, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 582, + 291, + 715 + ], + "type": "text", + "content": "GSM8K contains grade-school level math word problems. Solving these is relatively time-efficient for adults: in one study, crowdworkers under time pressure managed to solve about 4.07 GSM8K problems in 15 minutes on average (Zhang et al., 2024). That corresponds to roughly 3.7 minutes per question for a human solver. The required skill is comparable to general math tutoring at the K-8 level, for which typical U.S. tutor rates are about " + }, + { + "bbox": [ + 52, + 582, + 291, + 715 + ], + "type": "inline_equation", + "content": "33 -" + }, + { + "bbox": [ + 52, + 582, + 291, + 715 + ], + "type": "text", + "content": "55 per hour on platforms like Wyzant (Wyzant Tutoring, 2025). At such a rate, paying a person to solve GSM8K problems would be economical, given that a proficient solver can complete" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 67, + 543, + 80 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 543, + 80 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 543, + 80 + ], + "type": "text", + "content": "approximately 16 questions in one hour (Zhang et al., 2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "spans": [ + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "type": "text", + "content": "MATH500 is a set of 500 advanced competition math problems (drawn from the harder tier of a larger MATH dataset). These problems are similar in difficulty to top-level contest questions such as late AIME or Olympiad qualifying problems. As with AIME, a well-prepared human might spend on the order of 10-15 minutes per problem, roughly " + }, + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "type": "text", + "content": "12 minutes on average for a hard competition question (Art of Problem Solving, 2023). Tutors capable of solving and teaching such Olympiad-level problems often charge rates on the order of " + }, + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "type": "inline_equation", + "content": "50 per hour (with a typical range of" + }, + { + "bbox": [ + 303, + 85, + 544, + 253 + ], + "type": "text", + "content": "35- $60 for competition math tutoring) (Wyzant Tutoring, 2025). This implies that solving roughly five MATH500 problems could cost about $50 and take around an hour, consistent with the per-question time and high skill required." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 258, + 544, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 258, + 544, + 414 + ], + "spans": [ + { + "bbox": [ + 303, + 258, + 544, + 414 + ], + "type": "text", + "content": "Two-Digit Addition consists of simple two-digit addition problems, which are very quick for humans to solve. Early elementary students are often expected to complete about 20-25 basic addition problems in one minute in \"mad minute\" drills (Fas, 2021). This corresponds to roughly 2-3 seconds per addition (0.04 minutes per question). Because the task is so elementary, the labor to solve large numbers of such problems can be valued at a lower hourly rate. Simple data-entry style work or basic math tasks on freelance platforms pay on the order of " + }, + { + "bbox": [ + 303, + 258, + 544, + 414 + ], + "type": "inline_equation", + "content": "10 -" + }, + { + "bbox": [ + 303, + 258, + 544, + 414 + ], + "type": "text", + "content": "20 per hour (Upwork, 2025). At $15/hour, for example, a worker could theoretically solve several hundred 2-digit additions within the hour, given the ~3-second average solution time (Fas, 2021)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 430, + 429, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 430, + 429, + 441 + ], + "spans": [ + { + "bbox": [ + 304, + 430, + 429, + 441 + ], + "type": "text", + "content": "B. Details of Evaluation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 450, + 543, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 450, + 543, + 569 + ], + "spans": [ + { + "bbox": [ + 303, + 450, + 543, + 569 + ], + "type": "text", + "content": "For each dataset in our evaluation, we sample up to 128 instances and run each model " + }, + { + "bbox": [ + 303, + 450, + 543, + 569 + ], + "type": "inline_equation", + "content": "n = 8" + }, + { + "bbox": [ + 303, + 450, + 543, + 569 + ], + "type": "text", + "content": " times to estimate the expected runtime cost and accuracy per sample. For all models except OpenAI's reasoning models, we set the temperature to 0.7 and top_p to 1.0. In the case of OpenAI's reasoning models, we use a temperature of 1.0 and do not apply top_p. Additionally, we use the default maximum token generation limits provided by each model. Per sample, we employ a concise but descriptive instruction prompt for the models to follow." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "spans": [ + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "type": "text", + "content": "In our experiments, we define the pass " + }, + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "type": "inline_equation", + "content": "r_m(p)" + }, + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "type": "text", + "content": " as whether the model obtains a correct answer after a single run or not (0 or 1), and the cost " + }, + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "type": "inline_equation", + "content": "c_m(p)" + }, + { + "bbox": [ + 303, + 576, + 542, + 612 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 616, + 542, + 630 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 616, + 542, + 630 + ], + "spans": [ + { + "bbox": [ + 313, + 616, + 542, + 630 + ], + "type": "interline_equation", + "content": "c _ {m} (p) = n _ {\\text {i n}} (m, p) \\cdot c _ {\\text {i n}} (m) + n _ {\\text {o u t}} (m, p) \\cdot c _ {\\text {o u t}} (m) \\tag {14}", + "image_path": "9a8f99b249a9f5e30425b3deabcfed43f74023c4cde283f0657c48f6b00e62e5.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "inline_equation", + "content": "n_{*}(m,p)" + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "content": " denotes the number of input / output tokens consumed / generated by the model " + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "content": " on problem " + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "inline_equation", + "content": "c_{*}(m)" + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "content": " denotes the dollar costs per input / output tokens consumed / generated by the model " + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 303, + 634, + 542, + 717 + ], + "type": "text", + "content": " (see Table 4 for the pricing). For the expert costs, we utilize the estimations from Table 3, and set the rates to the upper-bound value to ensure the approximation of the expert accuracy being 1." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 64, + 535, + 186 + ], + "blocks": [ + { + "bbox": [ + 61, + 64, + 535, + 186 + ], + "lines": [ + { + "bbox": [ + 61, + 64, + 535, + 186 + ], + "spans": [ + { + "bbox": [ + 61, + 64, + 535, + 186 + ], + "type": "table", + "html": "
DatasetQualification RequirementsHourly RateTime per QuestionEst. Cost
AIMEAdvanced high-school contest math skills$45–$100~12 minutes$9–$20
BBQGeneral familiarity with social biases$15~0.4 minutes (24 sec)$0.10
GPQA Dia.Graduate-level domain expertise$100~35 minutes$58
GSM8KBasic arithmetic reasoning$33–$55~3.7 minutes$2–$3.50
MATH500Strong competition-level problem-solving$35–$60~12 minutes$7–$12
Two-Digit Add.Basic numeracy$10–$20~0.04 minutes (3 sec)$0.01–$0.02
", + "image_path": "3a562bbe1004e7cf970e8b8277eea3ed839c8f64724ec353dd49ba5d688790e0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 194, + 543, + 216 + ], + "lines": [ + { + "bbox": [ + 52, + 194, + 543, + 216 + ], + "spans": [ + { + "bbox": [ + 52, + 194, + 543, + 216 + ], + "type": "text", + "content": "Table 3: Estimated costs of hiring a human expert to solve one question from each dataset, based on typical qualifications, hourly rates, and time per question." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 57, + 222, + 148, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 222, + 148, + 234 + ], + "spans": [ + { + "bbox": [ + 57, + 222, + 148, + 234 + ], + "type": "text", + "content": "Experiment Prompt" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 57, + 237, + 286, + 485 + ], + "blocks": [ + { + "bbox": [ + 57, + 237, + 286, + 485 + ], + "lines": [ + { + "bbox": [ + 57, + 237, + 286, + 485 + ], + "spans": [ + { + "bbox": [ + 57, + 237, + 286, + 485 + ], + "type": "text", + "content": "Please solve the following question. You can explain your solution before presenting the final answer. Format your final answer as: ... Instructions: - For multiple-choice: Give only the letter (e.g., (A)). - For numeric: Give only the number (e.g., 42). - For free-response: Provide the full final answer text. INPUT: , , {input} ," + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 53, + 506, + 165, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 506, + 165, + 519 + ], + "spans": [ + { + "bbox": [ + 53, + 506, + 165, + 519 + ], + "type": "text", + "content": "C. Additional Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 527, + 246, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 527, + 246, + 540 + ], + "spans": [ + { + "bbox": [ + 52, + 527, + 246, + 540 + ], + "type": "text", + "content": "C.1. Expected Accuracy and Inference Costs" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 546, + 291, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 546, + 291, + 606 + ], + "spans": [ + { + "bbox": [ + 52, + 546, + 291, + 606 + ], + "type": "text", + "content": "As discussed in the Section 3.2, we share the results of expected cost and accuracy per model per dataset. We can observe the skewed preference of a particular model family under each metric, implying the inability of expressing economic impact of models through these metrics solely." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 619, + 216, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 619, + 216, + 631 + ], + "spans": [ + { + "bbox": [ + 52, + 619, + 216, + 631 + ], + "type": "text", + "content": "C.2. Relative Gain per Model Release" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 637, + 291, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 637, + 291, + 674 + ], + "spans": [ + { + "bbox": [ + 52, + 637, + 291, + 674 + ], + "type": "text", + "content": "Figure 4 presents the relative improvement in temporal frontier cost-of-pass for each model release, illustrated using bar plots. Namely, we calculate:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 126, + 689, + 291, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 689, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 126, + 689, + 291, + 716 + ], + "type": "interline_equation", + "content": "\\frac {G _ {p \\sim D} \\left(\\left\\{m _ {t} \\right\\} , \\mathcal {M} _ {t - 1}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {t - 1}\\right)} \\tag {15}", + "image_path": "929a09e0fe5367a1f0d28cf97f7fb951feaaade98d98308c1806d8f0b0911abf.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 222, + 542, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 222, + 542, + 330 + ], + "spans": [ + { + "bbox": [ + 304, + 222, + 542, + 330 + ], + "type": "text", + "content": "The results indicate that the reasoning models demonstrate notable advancements, particularly on complex quantitative tasks. In contrast, lightweight models exhibit marked gains on basic tasks. These findings support the observations from our experiments (Sections 3.2, 3.4). Notably, The substantial improvement observed for GPT-4o is likely due to it being the first model included in our analysis, resulting in a pronounced leap relative to the baseline cost associated with human expert annotation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 342, + 513, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 513, + 367 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 513, + 367 + ], + "type": "text", + "content": "C.3. Counterfactual Frontier Cost-of-Pass in the Absence of a Single Model" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 373, + 544, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 373, + 544, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 373, + 544, + 421 + ], + "type": "text", + "content": "In this section, following the methodology outlined in Section 3.4, we quantify the relative improvement in frontier cost-of-pass using a counterfactual approach. Specifically, for each model " + }, + { + "bbox": [ + 304, + 373, + 544, + 421 + ], + "type": "inline_equation", + "content": "m_{*}" + }, + { + "bbox": [ + 304, + 373, + 544, + 421 + ], + "type": "text", + "content": ", we calculate the following:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 363, + 426, + 542, + 454 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 426, + 542, + 454 + ], + "spans": [ + { + "bbox": [ + 363, + 426, + 542, + 454 + ], + "type": "interline_equation", + "content": "\\frac {G _ {p \\sim D} \\left(\\left\\{m _ {*} \\right\\} , \\mathcal {M} _ {T} \\backslash \\left\\{m _ {*} \\right\\}\\right)}{V _ {p \\sim D} \\left(\\mathcal {M} _ {T} \\backslash \\left\\{m _ {*} \\right\\}\\right)}, \\tag {16}", + "image_path": "e80adc147208c7a870476ff72078de8f24d2604feef4f8aab2fc804c0c08726b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 458, + 543, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 543, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 543, + 542 + ], + "type": "text", + "content": "quantifying the essentialness of the model " + }, + { + "bbox": [ + 304, + 458, + 543, + 542 + ], + "type": "inline_equation", + "content": "m_*" + }, + { + "bbox": [ + 304, + 458, + 543, + 542 + ], + "type": "text", + "content": ". The results presented in Figure 5 demonstrate that the contributions of most individual models are largely compensable by the remaining models. Furthermore, we observe a similar coarse-level trend, as noted in Section 3.4, indicating that different model families provide greater benefits in specific task categories." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 557, + 542, + 584 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 542, + 584 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 542, + 584 + ], + "type": "text", + "content": "D. Limitations of Our Framework and Future Work Directions" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 592, + 543, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 592, + 543, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 592, + 543, + 627 + ], + "type": "text", + "content": "In this section, we acknowledge the limitations of the presented framework and propose directions for future improvements and extensions." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "type": "text", + "content": "A primary limitation pertains to our definitions and computations of cost " + }, + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "type": "inline_equation", + "content": "(C_p(m))" + }, + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "type": "text", + "content": " and performance " + }, + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "type": "inline_equation", + "content": "(R_{p}(m))" + }, + { + "bbox": [ + 303, + 633, + 544, + 718 + ], + "type": "text", + "content": ". Specifically, our current cost computation considers only input and output token costs as proxies for the total expense incurred in obtaining correct outputs. This approach neglects indirect or overhead costs associated with generating incorrect outputs, such as subsequent verification costs. Regarding per" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 118, + 64, + 478, + 213 + ], + "blocks": [ + { + "bbox": [ + 118, + 64, + 478, + 213 + ], + "lines": [ + { + "bbox": [ + 118, + 64, + 478, + 213 + ], + "spans": [ + { + "bbox": [ + 118, + 64, + 478, + 213 + ], + "type": "table", + "html": "
CategoryModelRelease DateCost (per million tokens)
Input TokensOutput Tokens
Lightweight ModelsLlama-3.1-8B7/23/2024$0.18$0.18
GPT-4o Mini7/18/2024$0.15$0.60
Llama-3.3-70B12/6/2024$0.88$0.88
Large ModelsLlama-3.1-405B7/23/2024$3.50$3.50
GPT-4o5/13/2024$2.50$10.00
Claude Sonnet-3.56/20/2024$3.00$15.00
Reasoning ModelsOpenAI o1-mini9/12/2024$1.10$4.40
OpenAI o3-mini1/31/2025$1.10$4.40
DeepSeek-R11/20/2025$7.00$7.00
OpenAI o112/5/2024$15.00$60.00
", + "image_path": "af65fcac32682b6e5ca9b5ec10bb48eec299698dbff86670f68f02431e483ce3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 91, + 242, + 504, + 450 + ], + "blocks": [ + { + "bbox": [ + 200, + 220, + 394, + 231 + ], + "lines": [ + { + "bbox": [ + 200, + 220, + 394, + 231 + ], + "spans": [ + { + "bbox": [ + 200, + 220, + 394, + 231 + ], + "type": "text", + "content": "Table 4: Per-token inference costs with release dates." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 91, + 242, + 504, + 450 + ], + "lines": [ + { + "bbox": [ + 91, + 242, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 91, + 242, + 504, + 450 + ], + "type": "table", + "html": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B89.4575.7821.4817.8737.3012.50
GPT-4o mini99.9088.5753.3218.0770.0214.58
Llama-3.3-70B99.9092.0985.0646.4872.7533.33
Large Models
Llama-3.1-405B99.7193.9585.7444.1467.8731.67
Claude Sonnet-3.5100.0094.4392.5855.3764.7515.83
GPT-4o99.7191.9990.0447.0773.1414.58
Reasoning Models
OpenAI o1-mini99.5192.5885.7449.1285.9453.33
OpenAI o1100.0094.0495.0273.8389.4572.50
DeepSeek-R1100.0093.3683.6954.8893.8560.83
OpenAI o3-mini100.0092.7783.7971.6888.5777.08
", + "image_path": "630111621fe5a87da45a6dc5cf0ef2e8173bf3a731e83a851a6919989be45eec.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 87, + 480, + 507, + 687 + ], + "blocks": [ + { + "bbox": [ + 52, + 457, + 541, + 470 + ], + "lines": [ + { + "bbox": [ + 52, + 457, + 541, + 470 + ], + "spans": [ + { + "bbox": [ + 52, + 457, + 541, + 470 + ], + "type": "text", + "content": "Table 5: Accuracy (%) per model per dataset: " + }, + { + "bbox": [ + 52, + 457, + 541, + 470 + ], + "type": "inline_equation", + "content": "{R}_{m}\\left( {p \\sim D}\\right)" + }, + { + "bbox": [ + 52, + 457, + 541, + 470 + ], + "type": "text", + "content": " . In each column,the 3 entries with the highest accuracy have blue highlights." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 87, + 480, + 507, + 687 + ], + "lines": [ + { + "bbox": [ + 87, + 480, + 507, + 687 + ], + "spans": [ + { + "bbox": [ + 87, + 480, + 507, + 687 + ], + "type": "table", + "html": "
Model CategoryBasic QuantitativeKnowledge BasedComplex Quantitative
2-Digit Add.GSM8KBBQGPQA Dia.MATH 500AIME24
Lightweight Models
Llama-3.1-8B4.2e-57.4e-55.2e-51.8e-41.5e-42.2e-4
GPT-4o mini5.4e-51.9e-41.0e-43.9e-43.7e-45.6e-4
Llama-3.3-70B1.6e-43.3e-43.1e-49.6e-46.7e-41.1e-3
Large Models
Llama-3.1-405B6.9e-41.4e-31.0e-33.0e-32.4e-33.7e-3
Claude Sonnet-3.52.1e-33.7e-33.0e-36.9e-35.9e-37.5e-3
GPT-4o2.3e-34.5e-32.7e-30.018.7e-30.01
Reasoning Models
OpenAI o1-mini5.4e-38.4e-37.6e-30.020.020.07
OpenAI o10.020.030.040.250.130.52
DeepSeek-R11.8e-35.1e-34.6e-30.040.010.04
OpenAI o3-mini1.1e-32.1e-32.6e-30.015.4e-30.02
", + "image_path": "16bb05ff693bf5ab03e08b00cbae1fffb84268ea796fdc389e85b0fd26e7e712.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 695, + 541, + 708 + ], + "lines": [ + { + "bbox": [ + 52, + 695, + 541, + 708 + ], + "spans": [ + { + "bbox": [ + 52, + 695, + 541, + 708 + ], + "type": "text", + "content": "Table 6: Dollar cost incurred per model per dataset: " + }, + { + "bbox": [ + 52, + 695, + 541, + 708 + ], + "type": "inline_equation", + "content": "{C}_{m}\\left( {p \\sim D}\\right)" + }, + { + "bbox": [ + 52, + 695, + 541, + 708 + ], + "type": "text", + "content": " . In each column,the 3 entries with the lowest cost have blue highlights." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 439, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 439, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 439, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 70, + 208, + 219 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 208, + 219 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 208, + 219 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 208, + 219 + ], + "type": "image", + "image_path": "69cafd20b82715c5ce5bb220598d484c9b0fbbe4119cd95059eba426708f91b8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 225, + 70, + 373, + 220 + ], + "blocks": [ + { + "bbox": [ + 225, + 70, + 373, + 220 + ], + "lines": [ + { + "bbox": [ + 225, + 70, + 373, + 220 + ], + "spans": [ + { + "bbox": [ + 225, + 70, + 373, + 220 + ], + "type": "image", + "image_path": "d3e4f5986542f85a49bded9c7166a5adf5e87fd57d136419bdebcd51f870ec80.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 389, + 70, + 537, + 220 + ], + "blocks": [ + { + "bbox": [ + 389, + 70, + 537, + 220 + ], + "lines": [ + { + "bbox": [ + 389, + 70, + 537, + 220 + ], + "spans": [ + { + "bbox": [ + 389, + 70, + 537, + 220 + ], + "type": "image", + "image_path": "76e0e15128f0ec1aecaad72d16f7351d5da78fa90a7f7176ffeee04462d6efca.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 59, + 227, + 209, + 378 + ], + "blocks": [ + { + "bbox": [ + 59, + 227, + 209, + 378 + ], + "lines": [ + { + "bbox": [ + 59, + 227, + 209, + 378 + ], + "spans": [ + { + "bbox": [ + 59, + 227, + 209, + 378 + ], + "type": "image", + "image_path": "d905ee90552f923adb69e449a7d5dae8bc9ddb1cc5bc118940307875016f3c44.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 79, + 387, + 515, + 405 + ], + "lines": [ + { + "bbox": [ + 79, + 387, + 515, + 405 + ], + "spans": [ + { + "bbox": [ + 79, + 387, + 515, + 405 + ], + "type": "text", + "content": "Figure 4: Bar plot showing the percentage of change in frontier cost-of-pass per model release (i.e. " + }, + { + "bbox": [ + 79, + 387, + 515, + 405 + ], + "type": "inline_equation", + "content": "\\frac{G_{p\\sim D}(\\{m_t\\},\\mathcal{M}_{t-1})}{V_{p\\sim D}(\\mathcal{M}_{t-1})}" + }, + { + "bbox": [ + 79, + 387, + 515, + 405 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 225, + 227, + 373, + 378 + ], + "blocks": [ + { + "bbox": [ + 225, + 227, + 373, + 378 + ], + "lines": [ + { + "bbox": [ + 225, + 227, + 373, + 378 + ], + "spans": [ + { + "bbox": [ + 225, + 227, + 373, + 378 + ], + "type": "image", + "image_path": "0528f4b08946c5d99d0f8a41955a8979a5315b0841581510756bf51c578c3295.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 389, + 227, + 536, + 378 + ], + "blocks": [ + { + "bbox": [ + 389, + 227, + 536, + 378 + ], + "lines": [ + { + "bbox": [ + 389, + 227, + 536, + 378 + ], + "spans": [ + { + "bbox": [ + 389, + 227, + 536, + 378 + ], + "type": "image", + "image_path": "d5c65aa52d9a808be3ad8cf01ef796aa6d446f1572a321a2ae404561039933af.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 411, + 291, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 411, + 291, + 554 + ], + "spans": [ + { + "bbox": [ + 52, + 411, + 291, + 554 + ], + "type": "text", + "content": "formance, the use of accuracy as a binary success-or-failure metric presupposes the existence of a reliable verification pipeline and a practical decision mechanism, potentially oversimplifying scenarios where these assumptions do not hold. Additionally, our cost-of-pass metric, which combines cost and performance, currently does not account for variance information, limiting its practical interpretability in situations where two scenarios with similar cost-of-pass values exhibit substantially different variances. Furthermore, from a practical standpoint, cost modeling could consider alternative units (e.g., latency, inference time, FLOPs), which are currently not analyzed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 559, + 292, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 559, + 292, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 559, + 292, + 715 + ], + "type": "text", + "content": "Nevertheless, a significant strength of our framework is its abstract and modular design, facilitating extensions to address these limitations. Future work can enhance the precision of cost computations by integrating additional cost factors, such as verification overheads or indirect costs. Moreover, the framework could be adapted to alternative resource-consumption metrics like latency, inference time, or FLOPs. Regarding performance evaluation, the binary accuracy metric could be replaced or supplemented with alternative success measures tailored to specific scenarios, especially those emphasizing a particular balance between performance and cost. Incorporating variance and other statistical information into cost and performance calculations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 411, + 538, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 411, + 538, + 423 + ], + "spans": [ + { + "bbox": [ + 304, + 411, + 538, + 423 + ], + "type": "text", + "content": "could also enhance practical usability and interpretability." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 428, + 544, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 428, + 544, + 559 + ], + "spans": [ + { + "bbox": [ + 303, + 428, + 544, + 559 + ], + "type": "text", + "content": "An additional limitation lies in the evaluation methodology, particularly regarding human expert cost estimation. Our framework assumes that experts can reliably solve tasks given sufficient conditions (e.g., adequate qualifications, time, compensation). However, this assumption may not hold for particularly challenging problems or datasets with inherently high uncertainty in achieving correct solutions. Future research could address this limitation by conducting rigorous human subject studies to empirically evaluate and incorporate expert performance variability into the cost estimation process." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 234, + 536, + 529 + ], + "blocks": [ + { + "bbox": [ + 59, + 234, + 536, + 529 + ], + "lines": [ + { + "bbox": [ + 59, + 234, + 536, + 529 + ], + "spans": [ + { + "bbox": [ + 59, + 234, + 536, + 529 + ], + "type": "image", + "image_path": "376285dc3f0a3c03e98979c258df0c77b14362360a25c3cd690f23f67492e99c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "lines": [ + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "spans": [ + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "text", + "content": "Figure 5: The relative improvement " + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "text", + "content": " in frontier cost-of-pass under a counterfactual setting, removing a model " + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "inline_equation", + "content": "m_*" + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "text", + "content": " from the model set " + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_T" + }, + { + "bbox": [ + 52, + 540, + 543, + 562 + ], + "type": "text", + "content": ". High values mean that the model is essential for maintaining the current frontier." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "spans": [ + { + "bbox": [ + 156, + 45, + 440, + 56 + ], + "type": "text", + "content": "Cost-of-Pass: An Economic Framework for Evaluating Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 304, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 304, + 741 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 304, + 741 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_content_list.json b/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..db7c8e8c0be7524e66417a27c0b8e475b4958222 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_content_list.json @@ -0,0 +1,4473 @@ +[ + { + "type": "text", + "text": "Representation Learning for Tabular Data: A Comprehensive Survey", + "text_level": 1, + "bbox": [ + 135, + 65, + 867, + 136 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jun-Peng Jiang, Si-Yang Liu, Hao-Run Cai, Qile Zhou, Han-Jia Ye", + "bbox": [ + 233, + 151, + 759, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Tabular data, structured as rows and columns, is among the most prevalent data types in machine learning classification and regression applications. Models for learning from tabular data have continuously evolved, with Deep Neural Networks (DNNs) recently demonstrating promising results through their capability of representation learning. In this survey, we systematically introduce the field of tabular representation learning, covering the background, challenges, and benchmarks, along with the pros and cons of using DNNs. We organize existing methods into three main categories according to their generalization capabilities: specialized, transferable, and general models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. We introduce a hierarchical taxonomy for specialized models based on the key aspects of tabular data—features, samples, and objectives—and delve into detailed strategies for obtaining high-quality feature- and sample-level representations. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks, leveraging knowledge acquired from homogeneous or heterogeneous sources, or even cross-modalities such as vision and language. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning. We group these general models based on the strategies used to adapt across heterogeneous datasets. Additionally, we explore ensemble methods, which integrate the strengths of multiple tabular models. Finally, we discuss representative extensions of tabular learning, including open-environment tabular machine learning, multimodal learning with tabular data, and tabular understanding tasks. More information can be found in the following repository: https://github.com/LAMDA-Tabular/Tabular-Survey.", + "bbox": [ + 104, + 191, + 892, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Tabular Data, Representation Learning, Deep Tabular Learning, Tabular Machine Learning, Tabular Foundation Model", + "bbox": [ + 104, + 400, + 879, + 414 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f93fe1bc007c8bd98d2af02027122592dc474f533fa54a8f3792835c82ce9f57.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 490, + 421, + 504, + 431 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 73, + 474, + 228, + 488 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tabular data, characterized by structured rows and columns, is one of the most prevalent data formats in real-world machine learning applications, spanning diverse domains such as finance [1], healthcare [2], education [3], recommendation systems [4], and scientific research. In particular, AI for scientific research (AI4science) has increasingly relied on tabular data, as numerous prominent datasets—such as those from genomics [5], chemistry [6], and climate science [7], [8]—naturally adopt tabular forms.", + "bbox": [ + 71, + 494, + 493, + 626 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tabular data inherently organizes information in a structured, table-like format. In this survey, we focus primarily on supervised tabular machine learning tasks, specifically classification and regression. Beyond their structured organization, tabular datasets frequently include heterogeneous attributes [9], encompassing numerical, categorical, or mixed data types that may be either dense or sparse. Additionally, many tabular datasets present quality challenges, such as noisy measurements, missing values, outliers, inaccuracies [10], and privacy constraints [11], all of which complicate the modeling process. The most common supervised tabular tasks are classification and regression, where the goal is to learn mappings from training data to discrete or continuous targets, respectively. As illustrated in Figure 1, each row represents an instance (with its corresponding label), while each column corresponds to a specific attribute or feature [12].", + "bbox": [ + 71, + 627, + 493, + 861 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e4038b53b6a263f14d47247efe8303fc0326a04166e3783818d71299bc25664d.jpg", + "image_caption": [ + "Figure 1: A brief introduction to tabular data and associated learning tasks. Each row represents an instance and each column corresponds to a specific attribute or feature, which can be numerical or categorical. The most common tabular machine learning tasks are classification and regression as shown in the right side of the figure." + ], + "image_footnote": [], + "bbox": [ + 509, + 474, + 916, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ideally, learned mappings should generalize effectively, accurately predicting outcomes for new instances drawn from the same underlying distribution.", + "bbox": [ + 503, + 751, + 924, + 796 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Machine learning methods for tabular data have evolved significantly over the years [13], [14], [15], [16]. Recently, the rise of deep learning has profoundly impacted domains like computer vision [17] and natural language processing [18], where Deep Neural Networks (DNNs) extract semantic representations directly from raw inputs [19], [20], [21]. These learned representations have not only improved generalization but have also facilitated knowledge transfer across related tasks [22]. The flexibility of DNNs in modeling complex feature interactions and learning rich hierarchical", + "bbox": [ + 501, + 796, + 923, + 944 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.16109v1 [cs.LG] 17 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "- J.-P. Jiang, S.-Y Liu, H.-R Cai, Q. Zhou, and H.-J. Ye are with School of Artificial Intelligence, Nanjing University, and National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210023, China. E-mail: {jiangjp.liusy,zhouql.yehj}@lamda.nju.edu.cn, caihr@smail.nju.edu.cn", + "bbox": [ + 71, + 876, + 491, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0d9d9cb1c1ca63909087b16acb67b2648855940519a0c15a4594452e098aa2db.jpg", + "image_caption": [ + "Figure 2: We organize existing tabular classification/regression methods into three categories according to their generalization capabilities: specialized (left), transferable (middle), and general (right) models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning." + ], + "image_footnote": [], + "bbox": [ + 76, + 58, + 903, + 359 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "structures has inspired significant interest in adapting deep learning techniques to tabular data.", + "bbox": [ + 71, + 452, + 491, + 481 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Indeed, DNNs were applied to tabular data decades ago, initially targeting dimensionality reduction and visualization tasks [23], [24], [25], [26], yet they typically struggled to match tree-based methods on standard classification and regression problems. Later advances in DNNs have led to significant improvements across various tabular-related applications, such as click-through rate prediction [27], [28], anomaly detection [29], recommendation systems [30], and time series forecasting [31], [32]. Modern deep learning approaches, benefiting from better-designed architectures, optimized training strategies, high-quality representations, have revitalized DNN performance on tabular data, often rivaling or surpassing traditional tree-based models [33], [34], [35]. Given the wide variety of approaches emerging in deep tabular modeling, a systematic overview that revisits critical factors and current methodologies in representation learning for tabular data has become increasingly necessary.", + "bbox": [ + 71, + 484, + 491, + 733 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This survey begins by introducing the background of tabular data learning, highlighting the challenges involved and critically examining the advantages and limitations of utilizing DNNs compared to classical—particularly tree-based—methods [36], [37], [38], [39]. Given the observed instability of method performance across different tabular datasets, we also discuss comprehensive strategies for dataset collection, evaluation, and analysis, aiming to establish robust criteria for aggregating performance metrics across multiple datasets [40], [41], [42], [43].", + "bbox": [ + 71, + 734, + 491, + 881 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We broadly categorize deep tabular methods into three types: specialized methods, transferable methods, and general methods, distinguished by the scope of datasets on which they are trained and deployed, as well as their corresponding", + "bbox": [ + 71, + 883, + 491, + 944 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "generalization capabilities (illustrated in Figure 2). Specialized tabular methods align closely with classical supervised models, typically trained and evaluated on data drawn from the same distribution. In contrast, transferable methods leverage knowledge from models pre-trained on one or multiple source datasets, subsequently fine-tuning these models on target datasets; the primary challenge here lies in addressing the heterogeneity between pre-trained sources and target tasks. The recently proposed general tabular methods—motivated by the remarkable \"zero-shot\" generalization abilities demonstrated by large language models (LLMs)—exhibit exceptional versatility. These general models can directly apply their learned representations to downstream tabular datasets without additional fine-tuning, achieving robust generalization due to advanced pre-training strategies. Although the generalization ability tends to increase from specialized to general models, it does not imply that specialized or transferable methods are less valuable; specialized models remain superior on large-scale datasets, and fine-tuning general models can further improve their predictive performance. Additionally, the first two types of methods provide foundational insights and valuable components that contribute significantly to advancements in general tabular models.", + "bbox": [ + 501, + 452, + 926, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For specialized methods, numerous designs have been proposed from diverse perspectives, and previous papers have often categorized these methods based primarily on their architectural characteristics or behaviors. Existing taxonomies [44], for example, group specialized methods into feature-preprocessing-based [33], [45], data-augmentation-based [46], [47], [48], [49], MLP variants [50], [34], specialized DNN architectures [51], [52], [53], [54], [55], [56], [57], [58], tree-mimic approaches [59], [60], [61], token-based tech", + "bbox": [ + 503, + 811, + 924, + 944 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 32, + 923, + 42 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "niques [62], [63], [33], [64], [65], regularization-driven methods [66], [67], and neighborhood-based strategies [68], [69], [35]. However, such categorizations can appear scattered, making it difficult to connect the core ideas between methods placed in distinct groups. In contrast, this survey introduces a hierarchical taxonomy based on the key aspects of tabular data—features, samples, and objectives—providing a cohesive organizational framework. Our approach emphasizes detailed strategies for obtaining high-quality representations at both feature- and sample-levels. This unified perspective helps bridge core ideas across diverse methods, facilitating clearer comparative discussions and potentially guiding the design of future, more advanced tabular models.", + "bbox": [ + 71, + 51, + 491, + 241 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instead of training a model from scratch on a single tabular dataset, transferable models leverage knowledge encoded in a pre-trained model from another dataset, which can significantly enhance the training process, especially when data or computational resources for the target task are limited. A major challenge in transferring knowledge across tabular tasks lies in the inherent heterogeneity between the source and target datasets, particularly differences in their feature and label spaces. In this survey, we adopt a broad perspective on transferable tabular models, categorizing methods based on the sources of their pre-trained knowledge. Specifically, we discuss models pre-trained on homogeneous tabular domains, such as self-supervised methods with additional pre-training steps on the target dataset itself [70], [71]; models pre-trained across heterogeneous tabular domains [72], [73], [64]; and methods transferring knowledge from other modalities, such as vision-based pre-trained models [74], [75], [76]. Additionally, since incorporating attribute semantics (when available) is a common strategy for bridging heterogeneous attribute spaces across tabular datasets [77], [78], [79], we also explore approaches leveraging language models in the final category. In particular, we further organize these language model-based strategies according to the methods they use to extract knowledge and the types of language models involved—ranging from small-scale language models to Large Language Models (LLMs) [80], [81], [82], [83].", + "bbox": [ + 76, + 243, + 491, + 621 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inspired by recent advancements in foundation models from vision and language domains [84], [85], general models—also known as tabular foundation models—expand the concept of transferable tabular models by enabling direct application to downstream tasks without additional fine-tuning. This capability, commonly referred to as the model's \"zero-shot\" ability, significantly enhances the model's usability across diverse tabular datasets. In contrast to transferable models, which primarily focus on bridging knowledge gaps between source and target datasets, general models aim to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. We categorize these general models based on the strategies used to achieve adaptiveness across diverse tabular tasks, specifically examining adaptations from both data-centric [86] and model-centric perspectives [87], [88]. Furthermore, we discuss critical branches of general tabular models in detail: the TabPFN variants leveraging in-context learning [89], [90], [91], and methods utilizing attribute and task semantics to unify heterogeneous tasks within a common representation framework [92], [93], [94].", + "bbox": [ + 71, + 621, + 491, + 926 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Additionally, ensemble methods [95], [96], [91] are in", + "bbox": [ + 96, + 926, + 491, + 941 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "produced, which improve the generalization ability based on the strengths of multiple tabular models. Finally, we briefly overview other relevant extensions of tabular learning, including clustering [97], [98], anomaly detection [99], [100], [101], data generation and imputation [102], [103], [104], interpretability [63], [105], [61], multimodal learning [106], [107], open-environment tabular machine learning [108], [109], [110], [111], and tabular understanding [112], [113]. By summarizing the state of the field and identifying open challenges, we aim to guide future research and applications in tabular data representation learning.", + "bbox": [ + 503, + 53, + 924, + 214 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 BACKGROUND", + "text_level": 1, + "bbox": [ + 504, + 233, + 653, + 247 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section presents the (supervised) tabular machine learning task, including the notation of tabular data learning, the history of tabular data, the challenges of learning from tabular data, evaluation metrics, and tabular benchmarks.", + "bbox": [ + 503, + 252, + 923, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Learning with Tabular Data", + "text_level": 1, + "bbox": [ + 504, + 329, + 745, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A supervised tabular dataset is formatted as $N$ examples and $d$ features/attributes corresponding to $N$ rows and $d$ columns in the table. An instance $\\pmb{x}_i\\in \\mathbb{R}^d$ is depicted by its $d$ feature values. Assume $x_{i,j}$ as the $j$ -th feature of instance $\\pmb{x}_i$ , it could be a numerical (continuous) one $x_{i,j}^{\\mathrm{num}}\\in \\mathbb{R}$ , like the temperature of a region or the density of the object. $\\pmb{x}_i$ can also be a categorical (discrete) value $x_{i,j}^{\\mathrm{cat}}$ , like one of multiple colors, the location of a person, or even some textual descriptions of the instance. Each instance is associated with a label $y_i$ , where $y_i\\in \\{1, - 1\\}$ in a binary classification task, $y_i\\in [C] = \\{1,\\dots ,C\\}$ in a multi-class classification task, and $y_i\\in \\mathbb{R}$ in a regression task.", + "bbox": [ + 503, + 347, + 923, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Remark 1. Ordinal regression [114], [115], also called ordinal classification, is a type of regression analysis used to predict an ordinal variable. It can be considered an intermediate problem between regression and classification. However, this survey primarily focuses on standard classification and regression tasks and does not specifically discuss ordinal regression.", + "bbox": [ + 504, + 525, + 924, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a tabular dataset $\\mathcal{D} = \\{(x_i, y_i)\\}_{i=1}^N$ , we aim to learn a mapping $f$ on $\\mathcal{D}$ that maps $x_i$ to its label $y_i$ . In other words, the model predicts $x_i$ with $\\hat{y}_i = f(x_i)$ . The general objective learning $f$ follows the structural risk minimization:", + "bbox": [ + 503, + 633, + 923, + 693 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {f} \\sum_ {\\left(\\boldsymbol {x} _ {i}, y _ {i}\\right) \\in \\mathcal {D}} \\ell (y, \\hat {y} _ {i} = f \\left(\\boldsymbol {x} _ {i}\\right)) + \\Omega (f). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 699, + 923, + 732 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$\\ell (\\cdot ,\\cdot)$ measures the discrepancy between the predicted label $\\hat{y}_i$ and the true label $y_{i},e.g.$ , cross-entropy in classification and mean square error in regression. $\\Omega (\\cdot)$ is the regularization on the model, which restricts the complexity of $f$ . We expect the learned $f$ is able to extend its ability to unseen instances sampled from the same distribution as $\\mathcal{D}$ .", + "bbox": [ + 503, + 737, + 921, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Tabular methods differ in their strategies to implement $f$ . The \"dummy\" approach makes predictions based on training labels $\\{y_i\\}_{i=1}^N$ directly, which outputs the major class in the training set for classification and the average of all labels for regression, respectively.", + "bbox": [ + 503, + 825, + 923, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In a $C$ -class classification task, classical parametric methods implement $f$ with a linear mapping, i.e., $f(\\pmb{x}_i) = \\pmb{W}^\\top \\pmb{x}_i + \\pmb{b}$ , where the classifier $\\pmb{W} \\in \\mathbb{R}^{d \\times C}$ and $\\pmb{b} \\in \\mathbb{R}^C$", + "bbox": [ + 503, + 898, + 924, + 941 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "is the bias. With different loss functions, we can implement Logistic Regression, SVM, or even AdaBoost. In contrast, non-parametric methods implement the prediction via $f(\\pmb{x}_i) = f(\\pmb{x}_i, \\mathcal{D})$ , depending on the whole training set. For example, KNN searches neighbors in the training set $\\mathcal{D}$ with the $K$ smallest distance w.r.t. $\\pmb{x}_i$ . KNN can be viewed as a specific label smoother, with a dynamic local region for every instance. [116] links KNN and Random Forest from their ways of smoothing training labels in their predictions.", + "bbox": [ + 71, + 53, + 491, + 184 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Deep tabular methods implement $f$ with a deep neural network, e.g. Most deep learning models could be decomposed into two parts, i.e., $f(\\pmb{x}_i) = \\pmb{W}^\\top \\phi(\\pmb{x}_i) + \\pmb{b}$ . Similar to the linear model, $\\pmb{W}$ and $\\pmb{b}$ are the components of linear classifier, with $\\pmb{W} \\in \\mathbb{R}^{d' \\times C}$ . $\\phi$ maps the input vector $\\pmb{x}_i$ into the $d'$ dimension space, which extracts semantic embeddings for the given tabular input. $\\phi$ could be implemented with MLP or residual network.", + "bbox": [ + 71, + 185, + 491, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 History of Tabular Data", + "text_level": 1, + "bbox": [ + 73, + 323, + 285, + 338 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Historically, classical machine learning tasks were predominantly formulated with tabular data, or datasets readily transformed into a tabular representation without explicitly designating them as \"tabular.\" In early literature, the term \"tabular\" typically referred to tables within relational databases [117], CSV files on the web [118], or tables in documents [119]. Relevant tasks included table extraction [120], parsing [121], understanding [122], and discovering association rules [123]. With the expansion of machine learning applications into other modalities such as images, texts, audio, and video, the classical vector-based data representations have come to be explicitly termed \"tabular data.\"", + "bbox": [ + 71, + 343, + 496, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Early statistical approaches such as linear regression, logistic regression, linear discriminant analysis, and K-Nearest Neighbors (KNN) predate artificial intelligence. Classical learning methods further expanded across various paradigms, including decision trees [124], [125], multi-layer perceptrons (MLPs), support vector machines (SVMs), and nearest centroid classifiers [5], [14]. Ensemble methods enhanced predictive performance by aggregating outputs from multiple base learners [126], [127]. More recently, gradient boosting frameworks [128], [129], such as XGBoost [130], LightGBM [131], and CatBoost [132], have become prominent due to their effectiveness and efficiency in tabular data applications and competitions [133], [134].", + "bbox": [ + 71, + 518, + 491, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With the development of deep learning, DNNs were applied to tabular classification and regression tasks decades ago, utilizing architectures such as stacked Restricted Boltzmann Machines and denoising autoencoders [135], [136], [137]. Early representation learning efforts primarily focused on dimensionality reduction and data visualization tasks [23], [24], [25], [26], yet these models struggled to surpass traditional tree-based methods in terms of generalization. However, advancements in neural network architectures and representation learning strategies have recently led to promising results in related tabular domains, including click-through rate prediction [27], [28], anomaly detection [138], [29], recommendation systems [139], [30], and time series forecasting [31], [140], [32], [141]. Innovations such as convolutional layers and learnable feature embeddings have improved the ability of deep models to capture high-order", + "bbox": [ + 71, + 709, + 495, + 944 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "attribute relationships [142], [143]. While early deep tabular methods lagged behind ensemble tree-based models, recent techniques have demonstrated competitive or superior performance [33], [34], [35], affirming deep representation learning as a promising direction for tabular data modeling.", + "bbox": [ + 503, + 53, + 924, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While several survey papers have been published [9], [144], the field of tabular data has witnessed remarkable progress over the past two years. On one hand, the emergence of new specialized methods has introduced significant shifts in the landscape, motivating the need for our comprehensive taxonomy. On the other hand, the rise of transferable and general approaches has greatly enhanced the generality and applicability of tabular data modeling, which has been overlooked in previous works.", + "bbox": [ + 503, + 126, + 926, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Challenges of Learning from Tabular Data", + "text_level": 1, + "bbox": [ + 504, + 280, + 854, + 294 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Different from other types of data sources, e.g., images and texts, there exist several challenges dealing with tabular datasets due to their characteristics.", + "bbox": [ + 503, + 299, + 923, + 342 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Heterogeneity of Features. Unlike continuous image data or token-based textual data, tabular datasets often contain both numerical and categorical attributes, each requiring distinct handling methods [9], [145]. Numerical features frequently exhibit varying ranges and distributions, necessitating normalization or scaling. Categorical features differ in cardinality and semantic interpretation, requiring encoding methods like one-hot vectors or embeddings. Consequently, tabular models must carefully handle these mixed data types to preserve the usability of each feature.", + "bbox": [ + 503, + 343, + 924, + 489 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Lack of Spatial Relationships. Tabular data inherently lacks spatial or sequential relationships that are naturally found in other modalities [74], [50]. The order of columns has no semantic or spatial meaning, making tabular data permutation-invariant regarding features. Moreover, standard tabular machine learning assumes rows are independently and identically distributed (i.i.d.), further eliminating temporal or sequential correlations present in data such as video or time series. This absence of inherent spatial or sequential structure challenges deep learning architectures traditionally designed to exploit such dependencies.", + "bbox": [ + 503, + 489, + 924, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Low-quality and Missing Data. Compared to image or text data, where contextual or spatial redundancies help manage missing or corrupted values, tabular data is more vulnerable to incomplete or erroneous entries [146], [147]. Missing values in tabular datasets can introduce significant biases and degrade prediction quality. Additionally, noisy or incorrect values can considerably affect model reliability. Data preprocessing steps, including data cleaning and imputation, become crucial to maintaining accuracy and robustness in tabular machine learning.", + "bbox": [ + 503, + 650, + 924, + 796 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Importance of Feature Engineering. Effective tabular models heavily depend on the quality of their input features [45], [148]. Unlike image or textual data, where DNNs inherently learn feature representations from raw data, tabular methods often require domain-specific knowledge and meticulous manual feature engineering. Identifying and modeling complex, nonlinear interactions among tabular features frequently demands sophisticated transformations and expert insight, significantly impacting the predictive performance of models [149].", + "bbox": [ + 503, + 796, + 924, + 944 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Class Imbalance. Tabular datasets frequently exhibit imbalanced label distributions, especially in classification tasks, where certain categories are underrepresented [150], [151]. Class imbalance complicates model learning, leading to biased outcomes toward majority classes and poor performance on minority classes. Specialized methods such as oversampling, undersampling, or tailored loss functions (e.g., focal loss [152]) are required to address this imbalance effectively. Evaluation criteria like the AUC or F1-score further help assess model quality in imbalanced settings. Recent research highlights differences between deep and classical models in handling imbalance, emphasizing the need for careful consideration [153], [154], [155], [41].", + "bbox": [ + 71, + 53, + 491, + 243 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remark 2. Class imbalance has long been a known issue in the tabular domain, even before the rise of deep learning [156], and methods such as SMOTE [157], [158] can easily be extended to deep learning methods during preprocessing. However, Current deep tabular methods primarily assume that the training and testing data come from the same distribution, even in cases involving class imbalance. In addition, some class imbalance methods in visual domain can be readily extended to the tabular data learning [159], [160]. Therefore, we do not delve into class imbalance in this survey.", + "bbox": [ + 73, + 247, + 491, + 409 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Scalability to Large Datasets. Tabular datasets can become large-scale and high-dimensional, presenting computational and generalization challenges [161]. With increasing dimensionality, the risk of overfitting increases, especially when the number of features significantly surpasses the number of samples. Consequently, efficient training algorithms, memory management strategies, and sufficient computational resources become essential. Effectively scaling tabular models to handle large datasets while maintaining generalization ability remains a challenging but critical research area [162]. Model Selection and Hyperparameter Tuning. Tabular models are particularly sensitive to hyperparameter settings [163], [164]. Selecting an appropriate model architecture and tuning hyperparameters, such as learning rate, layer depth, or number of trees, can be computationally expensive and time-consuming. Despite the advancement of automated machine learning (AutoML) techniques [165], [166], [167], efficiently identifying optimal configurations for deep tabular methods under practical constraints remains challenging and critical for achieving high predictive performance.", + "bbox": [ + 71, + 417, + 491, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Domain-Specific Constraints. Certain application domains, such as healthcare or finance, impose additional regulatory or ethical requirements on model development [168]. For example, healthcare applications must comply with privacy standards like HIPAA [169] and provide explainability to clinicians. Financial models similarly must adhere to fairness regulations and industry standards. These constraints can restrict algorithm selection, necessitate interpretable predictions, and require additional validation, explainability, and auditability procedures [170], [171], [172].", + "bbox": [ + 71, + 710, + 491, + 857 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.4 Evaluation of a Tabular Method", + "text_level": 1, + "bbox": [ + 73, + 877, + 344, + 892 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We present the evaluation of tabular methods, ranging from traditional to modern, to provide a comprehensive evaluation across different aspects. For a given model on a dataset $\\mathcal{D}$ ,", + "bbox": [ + 71, + 898, + 491, + 944 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "we employ standard metrics that quantify the discrepancy between the predicted label $\\hat{y}_i$ and the true label $y_i$ .", + "bbox": [ + 501, + 53, + 921, + 82 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation on A Single Task. For classification tasks, Accuracy (or Error Rate) is commonly employed as the primary metric. AUC and F1 scores are further used to address imbalanced label distributions, while Expected Calibration Error (ECE) [173], [174] calculates the weighted average error of the estimated probabilities. All criteria are the higher, the better, except the error rate and ECE. For regression tasks, common metrics include Mean Squared Error (MSE), Mean Absolute Error (MAE), and Root Mean Squared Error (RMSE), with MAE and RMSE sharing the scale of the original labels. Lower values denote superior performance. Additionally, the coefficient of determination $(\\mathbb{R}^2)$ is employed, with higher values indicating a better fit.", + "bbox": [ + 501, + 83, + 924, + 271 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In tabular machine learning, the diversity of datasets makes it difficult for any single model to consistently excel across all scenarios. Therefore, evaluating models requires not only assessing their performance on individual datasets but also employing aggregated metrics that capture their overall effectiveness across multiple datasets.", + "bbox": [ + 501, + 272, + 924, + 358 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation on A Set of Tasks. Early research predominantly relied on Average Rank (Friedman Rank) [12], [39], often used in conjunction with Critical Difference Comparisons, to evaluate model performance across multiple datasets. Models are ranked per dataset based on a chosen metric (e.g., accuracy, AUC, RMSE), and the average rank is computed across datasets. To ensure statistical robustness, hypothesis tests were employed to assess the significance of ranking differences, providing a more reliable comparative analysis. For multiple comparisons, tests such as the Wilcoxon-Holm, Fredman, and Nemiyi tests are employed [175]. To address the potential degradation of average rank by poor performance on some datasets, the Probability of Achieving the Maximum Accuracy (PAMA) [12] is defined as the fraction of datasets in which a model attains the highest accuracy. An alternative to PAMA accounts for near-optimal performance, $P95$ quantifies the likelihood of a model attaining at least $95\\%$ of the maximum accuracy, which is computed as the ratio of datasets where the classifier achieves at least $95\\%$ of the maximum accuracy to the total number of datasets.", + "bbox": [ + 501, + 359, + 924, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As research progressed, more diverse evaluation metrics were introduced. The Arithmetic Mean of a chosen metric provides a direct comparison across datasets, but variations in the scales of evaluation metrics across datasets can distort results. To mitigate this issue, performance metrics are often normalized before aggregation, with normalized Accuracy applied to classification tasks and normalized RMSE (nRMSE) used for regression [36], [34]. Depending on the evaluation framework, Mean Normalized Error can be used, but its dependence on normalization can hinder independent optimization. To further address these limitations, the Shifted Geometric Mean (SGM) error was introduced, which aggregates errors multiplicatively, reducing sensitivity to extreme values and ensuring more stable cross-datasets/splits comparisons [34].", + "bbox": [ + 501, + 651, + 924, + 854 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Beyond absolute performance, relative comparisons are also important. The Relative Improvement metric quantifies a model's performance gain over a baseline (e.g., a simple MLP), offering insight into efficiency relative to simpler alternatives [176]. More recently, drawing inspiration from the ELO rating system[177], [178], ELO-based evaluation has", + "bbox": [ + 501, + 854, + 924, + 944 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "been introduced [179], modeling model-to-model comparisons as pairwise competitions across datasets. The ELO Score iteratively adjusts rankings based on relative performance, providing a more dynamic, fine-grained assessment.", + "bbox": [ + 71, + 53, + 491, + 113 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5 Tabular Benchmarks and Datasets", + "text_level": 1, + "bbox": [ + 73, + 132, + 369, + 145 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This section introduces existing benchmarks and datasets, along with associated considerations for constructing the benchmarks and evaluation protocols.", + "bbox": [ + 71, + 151, + 491, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5.1 Popular Tabular Benchmarks and Datasets", + "text_level": 1, + "bbox": [ + 73, + 209, + 423, + 223 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first introduce several benchmarks based on raw features constructed from various aspects. Then, we present datasets with rich semantics, following some tabular toolboxes and evaluation protocols.", + "bbox": [ + 71, + 227, + 488, + 285 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Standard Benchmarks. Methods for tabular data have preferences depending on the dataset, and evaluating them on limited datasets can be easily influenced by randomness or other factors. Therefore, it's important to consider various aspects to ensure a more comprehensive and reliable benchmark evaluation.", + "bbox": [ + 71, + 286, + 491, + 372 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A comprehensive benchmark should cover a diverse set of datasets to test the model's generalization capabilities across different tasks and feature types. The benchmark should include datasets from different task types, including binary classification, multi-class classification, and regression tasks. [12] evaluates 179 classifiers across 17 families on 121 datasets, concluding that Random Forest variants were the most likely to perform best overall. [50] explores MLPs with parameterized techniques, such as ensembling and data augmentation, over 40 classification datasets. Similarly, [33] demonstrates the effectiveness of MLPs, ResNets, and transformer-based models on 11 datasets. [36] conducts experiments on 45 datasets, investigating the differences between tree-based and DNN-based methods.", + "bbox": [ + 76, + 373, + 491, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The benchmark should cover datasets with varying sizes, including datasets with a large number of samples and features as well as smaller datasets. The diversity of dataset sizes helps evaluate the scalability and efficiency of different models. [39] includes 176 classification datasets and evaluate 19 methods, comprising 8 classical and 11 deep methods. In this study, the pre-trained TabPFN model [89] emerges as the top performer on average, even when limited to randomly sampled training sets of 3000 examples. However, limited trials for hyperparameter tuning and strict time constraints in [39] may have led to suboptimal evaluations for some deep tabular methods [180].", + "bbox": [ + 71, + 578, + 491, + 752 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To ensure robustness and generalization, datasets from multiple domains should be included. Common domains for tabular data include healthcare, biology, finance, education, and physics. Additionally, some datasets are derived from other domains, such as image or speech data, by feature extraction. [181] evaluates attention mechanisms and contrastive learning methods across 28 tabular datasets, comparing their performance with traditional deep learning and machine learning approaches. [44], with a particular focus on DNN-based models, uses a benchmark of over 300 tabular datasets spanning a wide range of task types, sizes, and domains. A more diverse collection allows us to assess whether a tabular method can generalize across applications.", + "bbox": [ + 71, + 752, + 493, + 944 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Semantic-Enriched Datasets. In addition, recent research has also focused on evaluating tabular data with rich semantics, such as incorporating meta information related to tasks or integrating attribute names. UniTabE [182] introduces a 7TB dataset containing 13 billion tabular examples for tabular pre-training, covering domains with investing, time series analysis, finance, economics, and with numerical, categorical, text data types. CM2 [79] proposes OpenTabs for crosstab pre-training, which contains an extensive collection of large-scale tables with column name semantics, including approximately 46M tabular samples. TP-BERTa [78] filters the OpenTabs for datasets with at least 10,000 samples and no more than 32 features, resulting in 101 binary classification datasets and 101 regression datasets with about 10 million samples. GTL [81] curates a collection of 384 public tabular datasets from Kaggle, which includes 176 classification and 208 regression tasks spanning a wide range of industrial domains. TabLib collects a set of 627M tables totaling 69TiB, along with 867B tokens of context [183]. TabLib was extracted from numerous file formats, including CSV, HTML, SQLite, PDF, Excel, and others, sourced from GitHub and Common Crawl. T4 (The Tremendous Tablib Trawl) [92] takes account of the inscrutable statistics and call sheets with personally identifiable information in TabLib and filters TabLib into a collection of 4M tables with 2.1B rows.", + "bbox": [ + 501, + 53, + 924, + 417 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Among these benchmarks and datasets, the semantic-rich ones are primarily used for pre-training LLMs on tabular data, while the others are mainly employed for evaluating standard methods. Besides, some toolboxes implement methods over tabular data, including those for classical methods, as well as those for deep tabular methods [184], [185], [186], [187], [188]. To establish a comprehensive tabular benchmark, several factors need to be considered, including the range of datasets and data quality.", + "bbox": [ + 503, + 417, + 926, + 550 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remark 3. Recent studies have proposed alternative perspectives for tabular evaluations, such as focusing on dataset age [42], leveraging expert-level feature engineering [43], and considering dataset version [44]. Studies have also highlighted generalization in open word environments in tabular datasets [43], [109], where the distributions of training, validation, and test sets differ significantly. More discussions are in Section 9. Incorporating diverse, high-quality datasets helps build a reliable benchmark for meaningful model comparisons.", + "bbox": [ + 504, + 553, + 926, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5.2 Evaluation Protocols", + "text_level": 1, + "bbox": [ + 504, + 719, + 702, + 732 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Given the strong sensitivity of tabular methods to data and the additional randomness in deep methods, robust evaluation is essential. Furthermore, due to the high computational cost of some methods, it is equally important to ensure evaluation efficiency.", + "bbox": [ + 501, + 737, + 924, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Model Selection. Model selection on the validation set involves both hyperparameter tuning and early stopping, which are essential for reliable evaluation. Due to the large number of hyperparameters in deep methods, automated methods like Optuna [189] are commonly used to explore hyperparameters through multiple trials [33], [69]. During tuning, models are evaluated on the validation split, while models can also be trained with multiple random seeds, providing more reliable evaluations. In each trial and the", + "bbox": [ + 501, + 811, + 926, + 944 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "final training, early stopping [190] often employed to prevent overfitting, and the epoch with the best validation performance is selected as the final model.", + "bbox": [ + 71, + 53, + 491, + 97 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Performance Evaluation. To assess generalization and prevent overfitting, models are typically evaluated using separate train/val/test splits, with a typical split ratio of $64\\% / 16\\% / 20\\%$ . However, such fixed splits may yield inconsistent results. With the rise of deep learning, researchers have proposed more robust evaluation protocols to better reflect model capabilities [191]. Two main approaches are commonly used: (1) fixing the data split and running multiple trials with different random seeds [54], [59], [105], [69], [62], [87], [33], [58], [192], [65], [71]; and (2) using cross-validation, where new train/val/test splits are generated in each fold [63], [89], [193], [68], [34]. A hybrid strategy combining both random seeds and cross-validation is also adopted [194].", + "bbox": [ + 71, + 97, + 491, + 301 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Recent studies show that holdout-based hyperparameter tuning can be unstable and prone to overfitting to the validation set [195], [180]. [180] found it ineffective on most TabZilla [39] datasets and instead used 5-fold cross-validation for more robust hyperparameter selection. As a result, they found the key meta-feature findings reported in [39] no longer held. This observation was also discussed in [44], which further identified meta-features that have a greater impact on model performance. For small datasets, alternative strategies have been proposed [196], [197], [198]. However, this approach significantly reduces the efficiency of hyperparameter search. [199] showed that simply reshuffling data splits can often improve generalization, making holdout selection competitive with cross-validation while remaining more computationally efficient.", + "bbox": [ + 71, + 301, + 491, + 521 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3 FROM CLASSICAL TO DEEP METHOD", + "text_level": 1, + "bbox": [ + 73, + 544, + 405, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We present possible advantages of deep learning for tabular data, as well as the potential challenges of deep learning when compared with tree-based methods.", + "bbox": [ + 71, + 564, + 490, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1 Advantages of deep representation learning", + "text_level": 1, + "bbox": [ + 71, + 630, + 439, + 645 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Deep tabular models offer several advantages beyond performance when compared with classical methods.", + "bbox": [ + 71, + 650, + 491, + 679 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ability to Model Complex Feature Interactions. DNNs are particularly adept at capturing high-order, non-linear interactions between features, which may be challenging for traditional models like linear regression or decision trees [51], [54]. By learning a hierarchical representation of features, DNNs allow low-level feature interactions to be captured in the initial layers, while higher-order interactions are identified in deeper layers. This ability to automatically learn complex relationships makes DNNs highly effective in capturing intricate dependencies within tabular data.", + "bbox": [ + 71, + 680, + 491, + 824 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "End-to-End Learning. Unlike traditional machine learning methods, which often involve separate steps for feature engineering, preprocessing, and model tuning, DNNs can process raw features and automatically extract useful representations without complex manual transformations. This end-to-end learning approach reduces human bias and simplifies the workflow, making the process more efficient. DNNs are trained through gradient optimization, enabling", + "bbox": [ + 71, + 825, + 491, + 944 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "a unified, streamlined solution for complex tasks [33], [107]. Additionally, deep models support multi-task learning, allowing related tasks to benefit from shared representations, enhancing both performance and efficiency [200], [70], [49]. Integration with Other Modalities. Deep tabular methods excel in multi-modal pipelines, where tabular data is integrated with other modalities, such as images, audio, or text. In AI4science applications, for instance, tabular data might be combined with image data [106], [107] (e.g., in medical imaging applications) or time-series data [201], [202] (e.g., in forecasting tasks). DNNs are well-suited to model interactions between heterogeneous data types, improving the overall performance. By jointly learning from multiple data sources, DNNs enhance their ability to make more accurate and comprehensive predictions across domains.", + "bbox": [ + 501, + 53, + 924, + 271 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Flexibility with Dynamic Environments. DNN-based methods benefit from the flexibility of gradient-based optimization, which allows efficient and iterative training. This flexibility makes DNNs adaptable to changing objectives without significant modifications, unlike tree-based models that often require specialized methods for different tasks [9]. Moreover, DNNs excel in dynamic environments, such as real-time predictions, financial analysis, and decision-making systems, where feature relationships may shift. This adaptability makes them suitable for online learning or incremental training, where new data is continuously integrated without retraining from scratch [203], [204].", + "bbox": [ + 503, + 271, + 926, + 445 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Long-Term Knowledge Transfer and Learning. DNNs are capable of long-term learning and knowledge transfer, which allows them to retain valuable knowledge gained from training on diverse tasks [205]. Once trained on a broad set of tasks, DNNs can transfer this knowledge to related domains, reducing the need for complete retraining [206]. This is especially advantageous in fields like AI4science, where a model trained on one type of scientific data can be adapted to other related domains, saving both time and computational resources. This ability to transfer knowledge across tasks is a key advantage of deep learning, enabling more efficient use of data and model capabilities over time.", + "bbox": [ + 503, + 446, + 924, + 621 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 Debates between Tree-Based Methods and DNNs", + "text_level": 1, + "bbox": [ + 504, + 633, + 908, + 647 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Although deep tabular methods have shown great potential in learning semantic representations and constructing nonlinear predictors, their initial performance often struggles to surpass that of classical tree-based ensemble methods, such as Gradient Boosted Decision Trees (GBDT). Many studies still treat GBDT approaches as strong baselines [36], [39], and in some cases, the advantages of deep tabular methods diminish as the number of evaluation datasets increases.", + "bbox": [ + 501, + 651, + 924, + 767 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Several reasons contribute to why tree-based methods retain their advantages over DNNs in many tabular tasks:", + "bbox": [ + 503, + 768, + 924, + 796 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Better Handling of High-Frequency Data. Tree-based methods, particularly GBDT models, are highly efficient at handling high-frequency data or dense datasets with many small variations [38]. These models build decision trees by recursively splitting the data at the most informative feature points, capturing both local and global patterns efficiently. DNNs, on the other hand, may not capture fine-grained patterns as effectively without extensive regularization or tuning [207], [208]. To address this limitation, [38] introduced frequency reduction as an inductive bias through", + "bbox": [ + 501, + 797, + 924, + 944 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the addition of scaling layers, while [45] demonstrated that periodic activation functions can significantly enhance neural networks' ability to learn high-frequency functions.", + "bbox": [ + 71, + 51, + 491, + 98 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Natural Handling of Mixed Data Types. Tabular data often includes a combination of numerical, categorical, and ordinal features [9], [44], [209]. Tree-based models are particularly strong when working with mixed data types, as they can handle categorical features directly without requiring one-hot encoding or embeddings. This ability to work with raw categorical data simplifies the preprocessing pipeline significantly. DNNs, however, generally require encoding techniques (e.g., one-hot encoding or learned embeddings) for categorical features, adding complexity and potentially leading to suboptimal performance [63].", + "bbox": [ + 71, + 104, + 491, + 267 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Lower Computational Requirements for Training and Inference. For certain tasks, tree-based models tend to be more computationally efficient than DNNs [33]. GBDTs and other decision tree-based models can train relatively quickly and are less computationally intensive than deep neural networks [210], [39]. This is especially true when the dataset is not massive or when the model needs to be trained and deployed rapidly. DNNs, on the other hand, often require significant computational resources (e.g., GPUs, longer training times) to achieve comparable performance, making them less ideal in resource-constrained environments [211], [88].", + "bbox": [ + 71, + 273, + 491, + 435 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Robustness to Noisy and Missing Data. Tree-based models are generally more robust to noisy data and missing values. When training a decision tree, missing values can be handled through optimal splitting that accommodates absent data, and trees can effectively deal with noisy or inconsistent data points [36]. DNNs, in contrast, are more sensitive to noise and often require careful preprocessing or specific techniques (e.g., data imputation or noise filtering) to avoid performance degradation with noisy or missing data [65], [89].", + "bbox": [ + 71, + 443, + 491, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Interpretability and Transparency. Tree-based models offer a significant advantage in terms of interpretability [60], [61], [105]. The decision-making process of models like GBDT can be easily visualized in the form of decision paths, and feature importance can be directly extracted [130], [132], [131]. This transparency makes tree-based models appealing in domains where model explainability is crucial, such as in finance, healthcare, and regulated industries. Although interpretability techniques like LIME [212] and SHAP [213] exist for DNNs, tree-based models still tend to be more intuitive and easier to explain, especially in complex decision-making environments. Recent works [214], [60], [59], [193] have sought to bridge this gap by enhancing neural network interpretability through emulation of tree-based model behaviors.", + "bbox": [ + 71, + 584, + 493, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Handling Outliers and Skewed Data. Tree-based methods are often better at handling outliers and skewed distributions in the data. When a feature exhibits extreme values or skewed distributions, decision trees are inherently less sensitive to such anomalies because they create splits based on feature ranges that naturally isolate outliers. This characteristic can make them more robust than DNNs, which may require specialized loss functions or techniques (e.g., robust scaling or outlier removal) to handle such data points [43], [109].", + "bbox": [ + 71, + 810, + 491, + 944 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 TAXONOMY OF SPECIALIZED METHODS", + "text_level": 1, + "bbox": [ + 504, + 51, + 856, + 66 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Similar to the evolution of deep learning, which progresses from specialized learning to transfer learning and ultimately to foundation models [244], we categorize deep tabular methods into three groups, as shown in Figure 2: specialized methods, transferable methods, and general methods. This classification reflects both the evolutionary development of deep learning techniques and the increasing generalization capabilities of these models.", + "bbox": [ + 503, + 71, + 923, + 186 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Specialized methods, being the earliest developed and most widely used category, will be our starting point for discussion. Tabular data consists of features (columns), samples (rows), and objectives (labels), which together define the structure and the task objectives. We emphasize detailed strategies for obtaining high-quality representations at both feature- and sample-level for the target task. Specifically, given the input data, according to the general learning objective in Equation 1, we consider how to transform the tabular input $x_{i}$ (feature aspect), how to construct relationships between samples (sample aspect), how to design the objective $\\ell(\\cdot)$ and regularize $\\Omega(\\cdot)$ (objective aspect). In particular,", + "bbox": [ + 503, + 186, + 924, + 362 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "- Feature Aspect. We focus on how to transform the raw tabular input (in various forms) into intermediate representations. We consider two types of features: numerical and categorical. By explicitly modeling the relationships between the two features (e.g., feature importance and interactions), we are able to enhance the model's understanding of the input space.", + "bbox": [ + 504, + 363, + 924, + 464 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "- Sample Aspect. In addition to features, we explore how to retrieve and utilize neighboring samples to capture intersample dependencies, thereby improving predictions. In order to improve the model's ability to make predictions, we explore the relationships between a target sample and its \"extracted neighbors.\"", + "bbox": [ + 504, + 464, + 924, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "- Objective Aspect. We examine how to modify the loss function and overall objective to introduce inductive biases. By directly guiding the learning process with the target variables, we incorporate prior knowledge or task-specific preferences into the model, thereby improving its generalizability and interpretability.", + "bbox": [ + 504, + 551, + 924, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In specialized methods, we focus solely on learning from pure data, excluding feature semantics considered in transferable methods (in Section 6), as they leverage the capabilities of language models. Since specialized methods encompass a wide range of approaches, and feature-aspect methods are the most extensive part of them, we will first introduce sample-aspect methods and objective-aspect methods in the following subsections. In Section 5, we will provide a detailed introduction to feature-aspect methods.", + "bbox": [ + 503, + 638, + 923, + 771 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Sample-aspect Specialized Methods", + "text_level": 1, + "bbox": [ + 504, + 785, + 815, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Sample interaction methods take a retrieval-based approach, focusing on relationships between individual samples rather than features. In a tabular dataset, each sample $x_{i}$ represents a row with $d$ features, and the goal is to leverage relationships between a target sample and its \"extracted neighbors\" to improve predictions.", + "bbox": [ + 501, + 803, + 923, + 891 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The general form for the sample interaction methods can be expressed as:", + "bbox": [ + 503, + 891, + 923, + 920 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {i} = f \\left(\\mathcal {R} \\left(\\boldsymbol {x} _ {i}, \\mathcal {D}; \\Phi\\right)\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 635, + 926, + 923, + 944 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/172234d4f3ca90801d6fb35295f332435c76d1756e9f1a75e41d12052939df1d.jpg", + "table_caption": [ + "Table 1: The taxonomy of representation learning for tabular data. The shade color in the last column denotes the subcategory, which is consistent with Figure 3." + ], + "table_footnote": [], + "table_body": "
Algorithm CategoryReference
Specialized Methods§ 5Feature-aspect MethodsFeature Encoding[33], [45], [64]
Feature Selection[59], [60], [105], [61], [193]
Feature Projection[52], [33], [34], [58]
Feature Interaction[54], [62], [63], [55], [65], [49], [215]
§ 4.1Sample-aspect MethodsSample Interaction[70], [216], [217], [192], [67]
Neighbor Retrieval[218], [68], [69], [35]
§ 4.2Objective-aspect MethodsTraining Objective[67]
Training Regularization[219], [50], [66]
§ 6Transferable MethodsHomogeneous[63], [48], [70], [220], [46], [221], [222], [223], [47], [224], [225], [226], [227]
Heterogeneous[228], [229], [222], [72], [73], [64], [230], [231]
Language Model[77], [232], [182], [79], [78], [233], [234], [82], [83], [235], [236], [80], [237]
Vision Model[238], [239], [240], [74], [75], [241], [242], [76]
§ 7General MehtodsRaw-Feature-based[86], [87], [88]
TabPFN Variants[89], [91]
Semantics-based[92], [93], [94], [243]
", + "bbox": [ + 125, + 88, + 867, + 327 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/65ce6230b3d29ce1199fc2127cfe0a5435c735cc905b76dbc127228712d8de2d.jpg", + "image_caption": [ + "Figure 3: The roadmap of deep representation learning tabular methods. We organize representative methods chronologically to show the concentration at different stages. Different colors of these methods denote the sub-categories." + ], + "image_footnote": [], + "bbox": [ + 81, + 333, + 908, + 705 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\mathcal{D}$ is the set of all samples (training data) available for retrieval or learning. $\\mathcal{R}(\\cdot)$ is the sample interaction module, which retrieves or aggregates information from relevant samples in $S$ for the target sample $\\boldsymbol{x}_i$ . $\\Phi$ represents the learnable parameters of $\\mathcal{R}$ . $f(\\cdot)$ is the prediction head that maps the aggregated information to the final output $\\hat{y}_i$ .", + "bbox": [ + 71, + 772, + 490, + 863 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Sample aspect approaches can be broadly categorized into two main strategies. The first approach introduces the modeling of sample relationships $\\mathcal{R}$ during representation training, allowing the model to learn better representations by capturing inter-sample dependencies. The second ap", + "bbox": [ + 71, + 869, + 491, + 944 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "proach is retrieval-based models, which directly predict outcomes by learning how to retrieve and utilize neighbors' relationships $\\mathcal{R}$ when testing.", + "bbox": [ + 501, + 772, + 923, + 818 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Sample Interaction. These methods assist in representation learning by allowing the model to capture relationships between samples, which in turn helps generate a more robust representation during training. During testing, the model becomes more sensitive to each sample without interaction.", + "bbox": [ + 501, + 821, + 921, + 895 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "SAINT [70] introduces inter-sample attention beyond inter-attribute attention, which improves row classification by relating each row to others in the table. NPT [216]", + "bbox": [ + 503, + 898, + 921, + 943 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 410, + 44 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "extends this via non-parametric Transformers, whereas Hopular [217] employs Hopfield networks, sharing conceptual alignment with SAINT [70]. Unlike nearest-neighbor classification, the distance metric is learned end-to-end. Prompt [192] posits that the feature importance in tabular data is sample-dependent. During feature extraction, it treats the information between samples as prompts. PTaRL [67] identifies two issues in the representation of tabular data samples: entanglement and localization. It addresses these by modeling global sample relationships through prototype generation and representation projection, helping the model produce clear and consistent decisions.", + "bbox": [ + 71, + 53, + 491, + 227 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Neighbor Retrieval. These methods construct high-quality contexts to aid prediction by retrieving valuable neighbors and designing efficient ways to utilize them based on the relationships between samples. The training data is used to assist during testing.", + "bbox": [ + 71, + 227, + 491, + 301 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "DNNR [68] argues that a key advantage of neighbor-based methods is the model's transparency, meaning that the model's decisions can be explained by inspecting its components. It enhances predictive performance by incorporating local gradient estimation and Taylor series approximation into the KNN framework. TabR [69] proposes that, compared to purely parametric (e.g., retrieval-free) models, retrieval-based models can achieve superior performance while also exhibiting several practically important properties, such as the ability for incremental learning and enhanced robustness. It encodes all candidate samples and then employs an attention-like mechanism to retrieve the samples that aid in making predictions, as explored in [218]. ModernNCA [35] revitalizes the classic tabular prediction method, Neighbourhood Component Analysis (NCA) [245], by designing and incorporating deep learning architectures and strategies. The resulting method efficiently leverages neighboring samples for prediction.", + "bbox": [ + 76, + 301, + 493, + 564 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Remark 4. The neighborhood-based approach closely resembles", + "bbox": [ + 73, + 566, + 493, + 580 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "bles the current in-context learning [246] mechanism. In particular, the in-context learning used in general models like TabPFN [89], [91] can aslo be considered a form of the neighborhood method. This concept of neighborhood not only helps in standard tasks, but also enhances transferable and general methods. For example, LoCalPFN [90] highlights that employing local linear regression can lead to more expressive decision boundaries, while utilizing local context allows performance to scale with the size of the training dataset.", + "bbox": [ + 94, + 580, + 493, + 728 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 Objective-aspect Specialized Methods", + "text_level": 1, + "bbox": [ + 73, + 748, + 398, + 763 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The general objective learning $f$ follows the structural risk minimization as in Equation 1, where $\\ell$ is the loss function to set the training objective between the prediction and the ground truth label. $\\Omega(\\cdot)$ is the regularization on the model, which directs the objective or restricts the complexity of $f$ .", + "bbox": [ + 71, + 766, + 491, + 840 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In traditional machine learning, models often rely on explicit regularization techniques on $\\Omega$ to ensure good generalization. Methods such as decision trees, support vector machines, and linear models typically incorporate regularization terms directly into the loss function to control model complexity and prevent overfitting. For example, in linear regression, regularization methods like L1 (Lasso) [247], L2", + "bbox": [ + 71, + 840, + 491, + 944 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/6bfc55f39ab4036bf3a443b1a972dd614f4618aff254eec32b873d00f78d2a83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 55, + 720, + 133 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/8beeccceaf07e5ef22e0485f90f15baa5f05fc84f8c7d42c772317522657ca6d.jpg", + "image_caption": [ + "Figure 4: Illustration of feature-aspect methods, including feature encoding, feature selection, feature projection and feature interaction." + ], + "image_footnote": [], + "bbox": [ + 540, + 148, + 669, + 227 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/ba9c9bbbcbdaf4046aec7ae3197352481649a45b1f333aeeab8500a90b2de4d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 745, + 55, + 916, + 130 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f88240d6a6ad7bfcf76cb01cc12c3e87300a15292b268038c8a926ab317be95e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 148, + 903, + 233 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(Ridge) [248], or Elastic-Nets [249] penalize large coefficients, effectively controlling the complexity of the model and helping to maintain a balance between bias and variance.", + "bbox": [ + 501, + 304, + 923, + 349 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Objective-aspect methods in deep learning are an extension of these traditional regularization techniques, where inductive bias is introduced by adjusting the loss function $\\ell$ or adding regularizers $\\Omega$ . In the training process, the goal is to leverage regularization on the model to improve predictions.", + "bbox": [ + 503, + 351, + 924, + 439 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Remark 5. Pre-train methods such as homogeneous transferable tabular methods in Section 6 also change the loss function $\\ell$ or the regularization $\\Omega$ to help pre-training. We will discuss these methods later.", + "bbox": [ + 504, + 455, + 924, + 513 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Objective-aspect approaches can be broadly categorized into two main strategies. The first approach involves training objectives, which enhance the model with a specialized ability. The second approach introduces a regularizer, allowing the model to learn strong generalized representations.", + "bbox": [ + 503, + 542, + 923, + 617 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training Objective. For training objectives, PTaRL [67] constructs prototype-based projection space and learns the disentangled representation around global prototypes. PTaRL uses a diversification constraint for representation calibration and introduces a matrix orthogonalization constraint to ensure the independence of global prototypes.", + "bbox": [ + 503, + 618, + 923, + 707 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training Regularization. For training regularization, RLNs [219] overcome the challenge of an intractable number of hyperparameters during training by introducing an efficient tuning scheme, which minimizes a new \"Counterfactual Loss.\" In RLNs, the regularization coefficients are optimized together with learning the network weight parameters. RLNs produce extremely sparse networks, thus providing more interpretable models and revealing the importance that the network assigns to different inputs. [50] introduces \"cocktails,\" dataset-specific combinations of 13 regularization techniques, showing that even simple neural networks can outperform tree-based architectures when optimized with these methods. TANGOS [66] introduces a regularization-based improvement. It regularizes neuron attributions to encourage neurons to specialize and become orthogonal to one another.", + "bbox": [ + 501, + 708, + 924, + 941 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 FEATURE-ASPECT SPECIALIZED METHODS", + "text_level": 1, + "bbox": [ + 73, + 51, + 452, + 66 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Tabular data is characterized by a diverse set of features, including both categorical and numerical variables. The complexity of tabular data arises from the variety of feature types, their interrelationships, and the high dimensionality often present. Traditional methods often rely on manual feature engineering, using techniques such as encoding categorical variables and selecting relevant features to improve model performance and reduce overfitting.", + "bbox": [ + 71, + 71, + 491, + 188 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As deep learning has evolved, these traditional techniques have been integrated and expanded upon. Deep tabular models are capable of automatically learning complex feature representations, reducing the need for explicit feature engineering. Feature-aspect methods, such as feature encoding, selection, projection, and interaction, are essential for transforming raw tabular inputs into more informative intermediate forms. These methods help improve a model's ability to capture intricate relationships between features, thereby enhancing its generalization capabilities.", + "bbox": [ + 71, + 189, + 491, + 335 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.1 Feature Encoding", + "text_level": 1, + "bbox": [ + 73, + 354, + 246, + 369 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Various encoding strategies have been explored for both categorical and numerical features in tabular data. Additionally, with the advancement of the attention mechanism, feature tokenization, similar to word embeddings in natural language processing, transforms all features into embeddings.", + "bbox": [ + 71, + 372, + 491, + 446 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Categorical Encoding. Categorical variables represent types of data which may be divided into groups. Examples of categorical variables are race, sex, age group, and educational level [250]. The categorical features are usually transformed in an index (integer). The two most popular techniques are an Ordinal Encoding and a One-Hot Encoding.", + "bbox": [ + 71, + 446, + 491, + 532 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Ordinal Encoding assigns each unique category a distinct integer value. This approach is useful when the categorical variable has an inherent order, such as \"low,\" \"medium,\" and \"high.\" The main advantage of Ordinal Encoding is its simplicity and efficiency, as it transforms the categorical variable into a single numeric column. However, it assumes that there is an ordinal relationship between the categories, which may not always be the case. For instance, if the categorical variable represents \"color\" with categories such as \"red,\" \"blue,\" and \"green,\" applying Ordinal Encoding would introduce an artificial order that does not reflect any meaningful ranking.", + "bbox": [ + 71, + 534, + 491, + 709 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "On the other hand, One-Hot Encoding creates a new binary column for each unique category in the original categorical variable. For example, for a variable \"color\" with three categories (red, blue, and green), One-Hot Encoding would generate three binary columns: \"is_red,\" \"is_green,\" and \"is_green,\" encoding red as $(1,0,0)$ , blue as $(0,1,0)$ and green as $(0,0,1)$ . Each column indicates the presence or absence of that particular category. One-Hot Encoding is useful for nominal categorical variables, where no order exists between the categories. While One-Hot Encoding avoids the assumption of ordinal relationships, it can lead to a high-dimensional feature space if the categorical variable has many unique values, which may result in increased computational costs and potential issues with overfitting.", + "bbox": [ + 71, + 709, + 491, + 912 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In some cases, more advanced encoding techniques are used to address the limitations of these basic approaches.", + "bbox": [ + 73, + 912, + 491, + 943 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "For example, Target Encoding assigns each category a value based on the mean of the target variable for that category. This method can be useful when there is a strong relationship between the categorical features and the target. In Leave-one-out embedding, every category is replaced with the mean of the target variable of that category, which excludes the current row to avoid overfitting.", + "bbox": [ + 501, + 53, + 924, + 155 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Numerical Encoding. For encoding, MLP-PLR [45] introduces two numerical encoding methods: Piecewise Linear Encoding (PLE) and Periodic Activation Functions. These encoding methods can be integrated with other differentiable layers (e.g., Linear, ReLU) to enhance performance. PLE produces alternative initial representations for the original scalar values and is based on feature binning. Periodic Activation Functions take into account the fact the embedding framework where all features are computed independently of each other forbids mixing features during the embedding process and train the pre-activation coefficients instead of keeping them fixed. [38] utilizes tools from spectral analysis, showing that functions described by tabular datasets often have high irregularity, and can be smoothed by transformations such as scaling and ranking to improve performance. They propose \"frequency reduction\" as an inductive bias during training.", + "bbox": [ + 501, + 155, + 924, + 388 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Feature Tokenization. Feature tokenizer performs a similar role to the feature extractor in traditional models. It transforms the input features to embeddings [62], [33]. Since the feature representations of features are very sparse and high-dimensional, a common way is to represent them into low-dimensional spaces (e.g., word embeddings).", + "bbox": [ + 503, + 388, + 924, + 477 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The general form for feature tokenization can be expressed as:", + "bbox": [ + 503, + 477, + 924, + 506 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {T} _ {i, j} = \\boldsymbol {b} _ {j} + \\mathcal {T} \\left(x _ {i, j}; \\Psi\\right) \\in \\mathbb {R} ^ {t}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 507, + 921, + 525 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $\\mathcal{T}(\\cdot)$ is the feature tokenizer module, which transforms the input feature vector $\\pmb{x}_i\\in \\mathbb{R}^d$ to a token embedding $T_{i,j}\\in \\mathbb{R}^t$ . $t$ is the dimension of token embedding. $\\pmb{b}_{j}$ is the $j$ -th feature bias. $\\mathcal{T}$ can be implemented with different forms. $\\Psi$ represents the learnable parameters of $\\mathcal{T}$", + "bbox": [ + 503, + 532, + 924, + 606 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In AutoInt [62], both the categorical and numerical features are embedded into low-dimensional spaces, which reduces the dimension of the input features and meanwhile allows different types of features to interact with each other. The embeddings of categorical features are computed by multiplying the embedding matrix with the multi-hot vector, while a corresponding embedding vector represents numerical features. TabTransformer [63] embed each categorical feature into a parametric embedding of dimension $t$ using Column embedding. An embedding vector is assigned to each feature, and a set of embeddings is constructed for all categorical features. Unlike TabTransformer, SAINT [70] proposes projecting numerical features into a $t$ -dimensional space before passing their embedding through the transformer encoder. FT-Transformer [33] adapts the Transformer architecture for tabular data, where all features are transformed to embeddings and applies a stack of Transformer layers to the embeddings. Specifically, the numerical tokenizer is implemented as the element-wise multiplication $\\boldsymbol{T}_i^{\\mathrm{num}} = \\boldsymbol{b}_i^{\\mathrm{num}} + x_i^{\\mathrm{num}} \\cdot \\boldsymbol{W}_i^{\\mathrm{num}}$ , and the categorical tokenizer is implemented as the lookup table $\\boldsymbol{T}_i^{\\mathrm{cat}} = \\boldsymbol{b}_i^{\\mathrm{cat}} + \\boldsymbol{e}_i^T \\boldsymbol{W}_i^{\\mathrm{cat}}$ , where $\\boldsymbol{e}_i^T$ is a one-hot vector for the corresponding categorical feature. Other transformer-based", + "bbox": [ + 501, + 607, + 924, + 944 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "methods, like [65], [72], [230], [215], use the same feature tokenizer as FT-Transformer.", + "bbox": [ + 71, + 51, + 491, + 82 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.2 Feature Selection", + "text_level": 1, + "bbox": [ + 73, + 99, + 246, + 114 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The high dimensionality of tabular data often causes overfitting, where the model focuses on irrelevant features and neglects the important ones. Feature selection reduces the number of features, retaining only the most valuable information. This helps prevent overfitting, improves generalization, and reduces computational complexity.", + "bbox": [ + 71, + 117, + 491, + 205 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Traditional tree-based models facilitate automatic feature selection by evaluating the impact of each feature on the target during the construction process. Decision trees utilize metrics such as information gain or the Gini index for feature selection, while ensemble methods like random forests determine feature importance by assessing each feature's contribution [251], [252], [253]. Recently, modern deep learning methods for tabular data often mimic trees' structures for feature selection.", + "bbox": [ + 71, + 207, + 491, + 335 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "GrowNet [59] and NODE [60] primarily mimic ensemble techniques. Inspired by GBDT, GrowNet designs a framework for building DNNs with multiple weak learners, where each learner's input consists of the original features plus the penultimate layer output from the previous learner. NODE uses a differentiable Oblivious Decision Tree as the base model, applying Bagging within each layer and Stacking across layers in a multi-layered structure. To make GAM [254] scalable and effective, NODE-GAM [61] modifies NODE to be a GAM, allowing GAM to learn quick, nonlinear jumps that better match patterns in real data.", + "bbox": [ + 71, + 337, + 491, + 496 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TabNet [105] and GRANDE [193] focus more on how tree models handle features. TabNet not only retains the representation learning capabilities of DNNs through self-supervised learning, but also incorporates the interpretability of tree models and the benefits of sparse feature selection, with a model structure designed for both feature selection and computation. GRANDE argues that the hard splits used by tree models are a key advantage over deep models, and thus proposes a method for learning hard, axis-aligned tree ensembles using gradient descent. GRANDE combines the beneficial inductive bias of axis-aligned splits with the flexibility provided by gradient descent optimization.", + "bbox": [ + 71, + 497, + 491, + 674 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.3 Feature Projection", + "text_level": 1, + "bbox": [ + 73, + 690, + 251, + 705 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Feature projection methods aim to project the raw data into a middle form, enhancing the representation ability for later architectures. Feature projection methods can be broadly categorized into two main approaches: MLP variants and special designed architectures. These approaches aim to enhance the model's ability to represent complex features for underlying feature structures.", + "bbox": [ + 71, + 709, + 490, + 811 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "MLP Variants. For model architecture, RTDL [33] investigates both ResNet-like and Transformer-based architectures tailored for tabular data, proposing simple yet effective adaptations of these widely-used deep models. In particular, the MLP architecture is constructed by stacking multiple blocks consisting of Linear layers, ReLU activations, and Dropout, which transform the raw tabular features into a fixed-dimensional hidden representation. A final linear layer is then used as the classification head. The paper highlights", + "bbox": [ + 71, + 811, + 491, + 944 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "an important insight: with proper hyperparameter tuning, even simple architectures like MLP and ResNet can achieve competitive performance on tabular benchmarks.", + "bbox": [ + 503, + 53, + 923, + 97 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Another contemporaneous work [50] enhances the MLP architecture by equipping it with a comprehensive suite of modern regularization techniques. Instead of introducing architectural innovations, this study focuses on systematically exploring combinations of 13 different regularization methods to identify an effective \"regularization cocktail\" for plain MLPs. The results demonstrate two key findings: (i) a well-regularized vanilla MLP can significantly outperform many recent, specialized neural architectures designed for tabular data; and (ii) such MLPs can even surpass strong traditional machine learning models like XGBoost across a range of benchmarks. For a more comprehensive strategy, RealMLP [34] explores multiple aspects including preprocessing, hyperparameters, architecture, regularization, and initialization.", + "bbox": [ + 503, + 97, + 924, + 314 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Special Designed Architectures. For units, motivated by the observation that normalization techniques are prone to disturbances during training, SNN [52] proposes the Scaled Exponential Linear Unit (SELU) to improve deep models for tabular data. NAMs [255] uses exp-centered (ExU) hidden units to improve the learnability for fitting jumpy functions. BiSHop [58] uses a dual-component approach, sequentially processing data both column-wise and row-wise through two interconnected directional learning modules. They use layers of generalized sparse modern Hopfield layers, a sparse extension of the modern Hopfield model with learnable sparsity.", + "bbox": [ + 503, + 315, + 926, + 491 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.4 Feature Interaction", + "text_level": 1, + "bbox": [ + 504, + 508, + 687, + 522 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Feature interaction methods aim to model relationships among features to enhance the representation power of deep learning models on tabular data. In tabular datasets, each sample $\\boldsymbol{x}_i \\in \\mathbb{R}^d$ is described by $d$ features, and the goal is to transform these raw features into enriched representations that improve predictive performance.", + "bbox": [ + 503, + 525, + 923, + 613 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The general form for feature interaction methods can be expressed as:", + "bbox": [ + 503, + 614, + 923, + 642 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {i} = f \\left(\\mathcal {H} \\left(\\boldsymbol {x} _ {i}; \\Theta\\right)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 643, + 642, + 923, + 659 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $\\pmb{x}_i\\in \\mathbb{R}^d$ is the input feature vector for a single instance, $\\mathcal{H}(\\cdot)$ is the feature interaction module, which transforms the input $\\pmb{x}$ by capturing feature dependencies or generating higher-order feature interactions. $\\Theta$ represents the learnable parameters of $\\mathcal{H}$ . $f(\\cdot)$ is the prediction head that maps the transformed representation to the final output $\\hat{y}$ .", + "bbox": [ + 503, + 665, + 921, + 753 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Feature interaction methods can be broadly categorized into two main approaches: the design of automatic feature interaction modules and the mining of implicit feature relationships. These approaches aim to enhance the model's ability to learn complex feature interactions and underlying feature structures within tabular data.", + "bbox": [ + 503, + 753, + 921, + 839 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Automatic Feature Interaction Modules. These methods do not assume specific feature types within the tabular dataset. Instead, they focus on improving the feature interaction process, enabling the model to learn complex, high-order feature relationships autonomously.", + "bbox": [ + 503, + 840, + 921, + 912 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "DCNv2 [54] improves the learning of the model's feature interaction by improving the \"Cross Network\" structure. It", + "bbox": [ + 503, + 912, + 923, + 941 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "employs low-rank methods to approximate feature crosses in subspaces and then integrates these subspaces using a gating mechanism. AutoInt [62] maps the original sparse high-dimensional feature vectors into a low-dimensional space and models high-order feature interactions by stacking interaction layers with a multi-head attention mechanism. Unlike AutoInt, the TabTransformer[63] only maps categorical features into contextual embeddings and feeds them into a Transformer model, while numerical continuous features are directly concatenated with the interacted contextual embeddings. When tabular data contains only numerical features, TabTransformer behaves in an MLP-like manner. Conversely, when the data contains only categorical features, TabTransformer operates similarly to AutoInt.", + "bbox": [ + 71, + 53, + 491, + 257 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Implicit Feature Relationships. Methods in this category typically assume that features in tabular data can be abstracted into implicit types and that it is necessary to design a suitable feature learning process to adapt to the characteristics of different types of features.", + "bbox": [ + 71, + 257, + 491, + 330 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "DANets [55] propose the existence of underlying feature groups in tabular data, where features within each group are correlated. They learn to group input features and perform further feature abstraction. SwitchTab [49] introduces the idea of extracting sample-specific \"Salient Features\" and sample-shared \"Mutual Information\" in tabular features. It leverages self-supervised learning to assist in learning feature representations. ExcelFormer [65] argues that while DNN assigns weights to each feature, it does not actively exclude irrelevant features. To address this, it introduces Semi-Permeable Attention for feature interaction, which allows features with lower information content to access information from more informative features while preventing highly informative features from being influenced by less relevant ones. AMFormer [215] proposes the hypothesis that arithmetic feature interactions are crucial for deep tabular models. Based on the Transformer architecture, it introduces components designed to extract both additive and multiplicative interaction information.", + "bbox": [ + 76, + 330, + 491, + 608 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6 FROM SPECIALIZED TO TRANSFERABLE MODEL", + "text_level": 1, + "bbox": [ + 73, + 628, + 488, + 643 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Instead of training a tabular model from scratch, learning based on a Pre-Trained Model (PTM) may increase the learning efficacy and reduce the resource and data requirement. For example, in a house prices prediction task, training a regressor in a certain area may benefit from a well-trained predictor from its neighborhood.", + "bbox": [ + 71, + 647, + 491, + 734 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Learning by reusing the PTM usually contains two stages. The first is the pre-training of a tabular model, from one or more upstream tasks. Given the PTM and a downstream task, an adaptation strategy is needed to transform the PTM to the target task or facilitate the learning of the target model. Formally, a well-trained model $g_{\\Theta}$ is often available and can be leveraged to facilitate the training of $f_{\\theta}$ over $\\mathcal{D}$ . Here, $g_{\\Theta}$ is pre-trained on a dataset $\\mathcal{D}' = \\{(x_j', y_j')\\}_{j=1}^{N'}$ with instances $x_j' \\in \\mathbb{R}^{d'}$ and labels $y_j' \\in [C']$ . To reuse expert knowledge in $g_{\\Theta}$ , an adaptation strategy is applied: $f_{\\theta} = \\text{Adapt}(f_{\\theta_0} \\mid \\mathcal{D}, g_{\\Theta})$ , where $\\theta_0$ is the initialization of the model. The notation could also be extended to cases with more than one PTM. The main challenge to reuse one or more PTMs is to bridge the gap between the PTM and the", + "bbox": [ + 71, + 734, + 493, + 944 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "target tabular model [256]. We categorize PTMs into three kinds based on the source of PTM $g_{\\Theta}$ .", + "bbox": [ + 503, + 53, + 921, + 82 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Homogeneous Transferable Tabular Model. First, the PTM may come from the same form of task (with $d' = d$ and $C' = C$ , but with different distributions $\\operatorname{Pr}(\\mathcal{D}') \\neq \\operatorname{Pr}(\\mathcal{D})$ or model families $g \\neq f$ ). For example, those pre-trained from other domains [71], or those unlabeled instances [48], [70].", + "bbox": [ + 503, + 82, + 921, + 157 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Heterogeneous Transferable Tabular Model. In addition, we consider a PTM pre-trained from a slightly different task with $\\mathcal{D}$ . In addition to the previous difference, the PTM $g_{\\Theta}$ may differ from $f_{\\theta}$ in feature dimension $(d' \\neq d)$ or target class set $(C' \\neq C)$ , so the adaptation method $\\mathbf{Adapt}(\\cdot)$ must handle such heterogeneity [64], [230].", + "bbox": [ + 501, + 156, + 923, + 243 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Cross-Modal Transferable Tabular Model. Moreover, the pre-trained model could also be constructed from another modality, such as vision and language domains. The cross-modality PTM is hard to be applied to the tabular prediction task in most cases, so auxiliary information from the tabular task like the semantic meaning of attributes (i.e., the attribute names) are usually assumed to be available in this case, where PTM like large language models may provide the latent semantic meanings as external knowledge [77], [73].", + "bbox": [ + 501, + 243, + 924, + 375 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The main limitation of the transferable tabular model is the assumption that the data distribution of the well-trained model should be similar to the distribution of the target model. For example in the previous house price prediction task, if the PTM is pre-trained in an area distance from the target area and targets diverse problems, it is hard to utilize the PTM in the target task [222]. Since different tabular tasks may vary in their distribution, feature, or classes, the general assumption is their exist a common \"dimension\" between the PTM and the target task. Only the distribution changes under the shared dimension and classes, or there exists an overlap between the feature or class spaces [230]. For example, in real-world applications such as healthcare, there are numerous medical diagnostic tables. These tables usually have some features in common such as blood type and blood pressure. For rare diseases with limited data, knowledge transfer from other diagnostic tables with overlapping features becomes beneficial [228]. When the feature/label semantics are available, two different tasks may be linked through the semantic space, and textual PTMs can be used to map the tabular instance to this space or facilitate the prediction in this space [80].", + "bbox": [ + 501, + 375, + 924, + 696 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Pros and Cons of transferable Models. Learning with a well-trained tabular model has several advantages based on the knowledge encoded in the PTM. First, the training efficiency of the target model is improved and the model may converge fast, as the PTM may provide better initialization weights or optimization paths. Then, the target model will reduce the requirement on the data size, i.e., learning with a few-shot dataset. Training based on a PTM also reduces the number of learnable parameters, leading to parameter-efficient tuning and reducing computational resources.", + "bbox": [ + 503, + 696, + 924, + 843 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.1 Homogeneous Transferable Tabular Model", + "text_level": 1, + "bbox": [ + 503, + 863, + 859, + 878 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Adapting a tabular model from another domain with different distributions is investigated in the field of unsupervised domain adaptation before the era of deep learning. One representative method is the biased regularization, which", + "bbox": [ + 501, + 883, + 924, + 944 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/66966f1b48254c69ec49709de32610ec85ebc14caea1a2cea39c1e73a8debf60.jpg", + "image_caption": [ + "Figure 5: Illustration of homogeneous transferable tabular methods. The pre-trained model could be constructed from supervised learning or self-supervised learning, which includes masked language model, contrastive pre-training, and hybrid methods." + ], + "image_footnote": [], + "bbox": [ + 83, + 61, + 480, + 196 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "minimizes the difference between the weights of the PTM and the target model, i.e.,", + "bbox": [ + 71, + 290, + 491, + 321 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\boldsymbol {W}} \\ell (\\boldsymbol {W}) + \\| \\boldsymbol {W} - \\boldsymbol {W} ^ {\\prime} \\| _ {F} ^ {2} = \\min _ {\\Delta \\boldsymbol {W}} \\ell \\left(\\Delta \\boldsymbol {W} + \\boldsymbol {W} ^ {\\prime}\\right) + \\| \\Delta \\boldsymbol {W} \\| _ {F} ^ {2}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 73, + 325, + 491, + 359 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$\\ell(W)$ is the loss function on the current weights $W'$ , and the regularize constraint the distance between the target model $W$ and the PTM weights $W'$ . We can reformulate the learning objective as learning the weights residual $\\Delta W$ . Biased regularization can be extended to the case where $f$ and $g$ are deep neural networks such as MLP, but it fails when the target model has a different architecture with the PTM. In this case, instead of matching two models through their weights, matching their predictions also helps. For example, twice learning [253] and knowledge distillation [257].", + "bbox": [ + 71, + 359, + 491, + 505 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Benefiting from the strong capacity of deep neural networks, some recent studies focus on pre-training a tabular model from unsupervised instances, and then adapting the model via fine-tuning the PTM on the target (even few-shot) labeled examples. This strategy could be applied in standard supervised learning or semi-supervised learning.", + "bbox": [ + 71, + 506, + 491, + 593 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Supervised Pre-training Objectives. A straightforward way to incorporate the target variable into the pre-training is by using the input corruption as an augmentation for the standard supervised learning objective. [71] identifies practices to pre-train tabular deep learning models that can be universally applied to different datasets and architectures. They show that using the object target labels during the pre-training stage benefits the downstream performance and advocates several target-aware pre-training objectives.", + "bbox": [ + 71, + 593, + 491, + 724 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Self-Supervised Pre-training Objectives. The self-supervised pre-training objectives can be mainly categorized into three categories, including the masked language model, contrastive pre-training, and hybrid methods.", + "bbox": [ + 71, + 724, + 491, + 782 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Masked Language Model (MLM). MLM is the unsupervised pre-training objective, where a random subset of features is masked for each sample, and the masked values are predicted in a multi-target classification manner [63]. VIME [48] estimates mask vectors from corrupted tabular data and reconstructs feature vectors for self-supervised learning. They use the trained encoder to generate multiple augmented samples for each data point by masking each point using several different masks and then imputing the corrupted values for each masked data point. SubTab [46] finds that reconstructing the data from the subset of its features rather", + "bbox": [ + 71, + 782, + 491, + 941 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "than its corrupted version in an autoencoder setting can better capture its underlying latent representation. SEFS [221] reconstructs the original input based on a randomly selected subset of input features, and simultaneously estimates the gate vector that defines which features are selected or not. MET [223] uses a concatenation of representations for all features instead of averaging and uses adversarial reconstruction loss in addition to the standard loss.", + "bbox": [ + 503, + 53, + 924, + 170 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Contrastive Pre-training. Contrastive pre-training uses data augmentations to generate positive pairs or two different augmented views of a given example, and the loss function encourages a feature extractor to map positive pairs to similar features. The key factor in contrastive learning is to generate positive and negative versions of a given instance $x_{i}$ . [70] utilizes CutMix [258] in the input space and Mixup [259] in the embedding space to obtain positive pairs, where other instances $x_{j \\neq i}$ are treated as negative ones. SCARF [47] generates a view for a given input by selecting a random subset of its features and replacing them with random draws from their respective empirical marginal distributions. STab [224] relies on two (or multiple) weight-sharing neural networks with different regularizations applied to a single input. By exploiting the stop-gradient operation technique, STab can model invariance with respect to more complicated regularizations while it will not collapse to an undesired trivial solution. DoRA [226] incorporates domain knowledge, training by intra-sample pretext task and inter-sample contrastive learning to learn contextualized representations. DACL+ [220], to overcome the reliance on a particular domain, uses Mixup noise to create similar and dissimilar examples by mixing data samples differently either at the input or hidden-state levels.", + "bbox": [ + 501, + 191, + 926, + 542 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Hybrid Methods. [222] explores several pre-training strategies including both supervised and unsupervised ones. It considers MLM as the unsupervised pre-training objective, and sets multi-label classification as the supervised pre-training objective. By fine-tuning the PTM with several choices, including those with frozen feature extractor or not, the paper observes that supervised pre-training leads to more transferable features in the tabular domain. LFR [227] conducts pretraining by learning to simultaneously reconstruct multiple randomly generated projection functions. It considers diverse data types to show the wide-ranging applicability of learning from randomness, including tabular, vision, and language. ReConTab [225] utilizes both self-supervised learning and semi-supervised learning. It uses regularization techniques for raw feature selection and leverages contrastive learning with labels to distill the most pertinent information for downstream tasks. [71] focuses on the setup with fully labeled tabular datasets to understand if pretraining helps tabular deep learning in a fully supervised setting and compares pretraining methods to the strong supervised baselines. They show that using the object target labels during the pertaining stage is beneficial for the downstream performance and advocate several target-aware pretraining objectives. [256] provides a systematic review and summarizes the recent progress and challenges of self-supervised learning for non-sequential tabular data.", + "bbox": [ + 501, + 563, + 926, + 944 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/0143006dcbf94506f23c4872c5ddb3dfd3a832c6465dc6e88fc3fcdfe7b31008.jpg", + "image_caption": [ + "Figure 6: Illustration of heterogeneous transferable tabular methods. During pre-training on one or multiple datasets, most of the parameters in the PTM are trained. For downstream tasks, only a small subset of parameters is fine-tuned while the rest remain fixed." + ], + "image_footnote": [], + "bbox": [ + 86, + 61, + 486, + 204 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6.2 Heterogeneous Transferable Tabular Model", + "text_level": 1, + "bbox": [ + 71, + 301, + 433, + 316 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The main intuition lies in the mapping $f$ and $g$ work in a similar fashion, i.e., predicting the labels with similar mechanisms. Therefore, the main idea to transfer knowledge is to match the target model with the well-trained one, over the weight space or the prediction space.", + "bbox": [ + 71, + 325, + 491, + 398 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Early methods mainly focus on the feature-level heterogeneity between $f$ and $g$ . One main assumption is that there exists a shared set of features between the pre-trained task $\\mathcal{D}'$ and the target task $\\mathcal{D}$ , then we may directly copy the weights corresponding to the shared features from the PTM. Some methods extend bias regularization to deal with heterogeneous feature spaces by padding the weights with zero. OPID [260] is a one-pass learning approach, which only needs to scan each instance once and to deal with evolving streams. In the pre-training stage, OPID compresses important information of vanished features into functions of survived features, and in the adaptation stage, it is expanded to include the augmented features. ReForm [261] learns the meta-representation for each feature and based on which calculates the relationship between features in the meta-representation space. ReForm then bridges the feature space gap through optimal transport, which could be further used to transform classifiers with different features and classes.", + "bbox": [ + 71, + 398, + 491, + 662 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A major advantage of neural models is that they are easily fine-tuned in new domains and learn reusable features. For example, as the deep PTM has the ability to extract generalizable features for a tabular task, reusing the knowledge from the PTM can utilize the strategies designed for visual and language domains. In detail, we can fix most of the parameters in the PTM and tune the remaining parts which only have limited parameters, for example, the linear probing or parameter-efficient fine-tuning.", + "bbox": [ + 71, + 662, + 491, + 796 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Reuse PTM Pre-trained from One Dataset. These methods primarily focus on the difference between the pre-trained and down-streaming datasets. TabRet [72] utilizes masked autoencoding to make the transformer work in downstream tasks. To transfer pre-trained large language models to tabular tasks, ORCA [73] trains an embedder to align the source and target distributions. TabToken [64] focuses on improving the quality of the feature tokens, which are an important component in tabular deep models. TabToken leverages a conditional contrastive loss to improve the", + "bbox": [ + 71, + 796, + 491, + 944 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "quality of learned embeddings and demonstrates enhanced transferability of deep learning models for tabular data.", + "bbox": [ + 503, + 53, + 921, + 82 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Pseudo-Feature method [222] utilizes pseudo-feature models individually for each new feature. In detail, given one additional feature in a downstream dataset, it first pretrains a model on the upstream data without that feature. Then Pseudo-Feature fine-tunes the pre-trained model on downstream data to predict values in the column absent from the upstream data. Next, the fine-tuned model is used back in the upstream datasets to predict and assign pseudo-values of this feature. After supplementing the upstream dataset with the \"unseen\" feature in the downstream task, PseudoFeature pre-trains and transfers the feature extractor to the downstream task again. This method is computationally expensive in our broader feature space adaptation scenario. Reuse PTM Pre-trained from Multiple Datasets. XTab [230] aims to enhance the transferability of the transformer. They address the challenge of inconsistent column types and quantities among tables by utilizing independent features and federated learning to pre-train the shared component.", + "bbox": [ + 501, + 82, + 921, + 345 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Another thread of method learns shared components such as attribute-agnostic transformation across datasets, which provides a good model initialization for partial parameters given a downstream task. [228] infers latent representations of each attribute and each response from a few labeled instances using an inference network. The attribute and response representations are enabled make predictions based on the task-specific properties of attributes and responses even when attribute and response sizes are different across tasks. DEN [229] uses a three-block architecture: a covariate transformation block followed by a distribution embedding block and then a classification block. They provide theoretical insights to show that this architecture allows the embedding and classification blocks to be fixed after pre-training on a diverse set of tasks. Meta-Transformer [231] leverages a frozen encoder to perform multimodal perception without any paired multimodal training data. In Meta-Transformer, the raw input data from various modalities are mapped into a shared space in meta learning [262], allowing a subsequent encoder with frozen parameters to extract high-level semantic features.", + "bbox": [ + 501, + 345, + 923, + 650 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6.3 Reusing a Pre-trained Language Model", + "text_level": 1, + "bbox": [ + 503, + 671, + 834, + 686 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In some cases, the semantic meaning of features is available, making it natural to leverage pre-trained language models for tabular data. Typically, two types of semantic information can be derived from a tabular dataset $\\mathcal{D}$ . First, attribute names for each of the $d$ features, $\\mathcal{A} = A_{1},\\ldots ,A_{d}$ , provide useful context. Additionally, meta-information such as a textual description, denoted as meta_description, can further enhance understanding. The learning process is then formulated as:", + "bbox": [ + 501, + 690, + 921, + 808 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} _ {i} = f \\left(\\boldsymbol {x} _ {i}, \\mathcal {A} \\mid \\mathcal {D}, \\text {m e t a} _ {\\text {d e s c r i p t}}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 816, + 923, + 833 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where the semantic information bridges the gap between feature spaces and facilitates knowledge transfer from pretrained tasks to downstream applications.", + "bbox": [ + 501, + 840, + 924, + 883 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Although pre-trained language models have demonstrated success in various domains, their application to tabular data remains limited due to the prevalence of numerical values and the scarcity of textual descriptions.", + "bbox": [ + 501, + 883, + 924, + 944 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 411, + 44 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/886d5839208dbb126a86d29a0904962b263e4a1c1cdbafa3fd800eb2090af5e9.jpg", + "image_caption": [ + "Figure 7: Illustration of transferable tabular methods with a language model. The language model can be applied at various stages, including feature tokenization, feature engineering, and textual serialization." + ], + "image_footnote": [], + "bbox": [ + 81, + 55, + 472, + 200 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Moreover, concerns about data privacy and security may further restrict access to semantic information. Consequently, language models are typically applied to tabular datasets only when textual context is sufficiently available.", + "bbox": [ + 71, + 281, + 491, + 339 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Language Models for Feature Tokenization. When the feature space changes, language-based methods assume that semantic relationships exist between feature descriptions and rely on large-scale language models to capture these connections. For example, the feature \"occupation\" in one task may share semantic similarity with the feature \"organization\" in another, allowing feature-label relationships to be reused across different datasets. By extracting feature embeddings (tokens), tables of varying sizes can be transformed into a standardized set of tokens in a shared space. A pre-trained transformer then encodes transferable knowledge, aiding the fine-tuning process for downstream tasks.", + "bbox": [ + 71, + 340, + 491, + 516 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "TransTab [77] trains a tokenizer based on the words present in tabular data and incorporates both column descriptions and table cells as raw input to a gated transformer model. The model is pre-trained via self-supervised learning or supervised contrastive loss and is validated on tasks such as transfer learning and feature incremental learning. PTab [232] adopts a similar approach, learning contextual representations from multiple tokenized tabular datasets before fine-tuning for downstream tasks. UniTabE [182] encodes and fuses information from column names, data types, and cell values into a set of tokens, applying an encoder-decoder architecture with Transformer and LSTM components. It is pre-trained using Multi-Cell-Masking and contrastive learning, where a sub-vector of an instance is treated as a positive sample while other instances or their subsets are considered negatives.", + "bbox": [ + 71, + 517, + 491, + 750 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "CM2 [79] introduces a cross-table pre-training framework that integrates attribute names and feature values. CM2 uses transformers to process feature tokens and employs a prompt-based Masked Table Modeling (pMTM) self-supervised objective, where column names act as prompts to assist in predicting masked features. TP-BERTa [78] follows a similar approach but incorporates numerical discretization strategies and magnitude tokenization for feature encoding, fine-tuning smaller pre-trained language models such as RoBERTa [263] for tabular data prediction. Its pre-training objective includes supervised loss and magnitude-aware triplet loss as a regularizer.", + "bbox": [ + 71, + 751, + 491, + 926 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "CARTE [233] utilizes a graph representation of tabular", + "bbox": [ + 96, + 926, + 491, + 941 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "data to handle heterogeneous feature spaces, transforming textual information from column names and entries into embeddings. A graph-attentional network is then applied to contextualize entries with column names and neighboring entries. CARTE is pre-trained on the YAGO3 knowledge base [264] by constructing graphlets for tabular data and employing contrastive loss, where the original graphlet and one truncated variant are positives, while other graphlets in the batch serve as negatives. The pre-trained CARTE model is subsequently fine-tuned for downstream tasks.", + "bbox": [ + 501, + 53, + 924, + 198 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Language Models for Feature Engineering. Discriminative features enhance the effectiveness of subsequent tabular learning models. Binder [234] identifies task input components that are not directly answerable by a model and leverages LLMs to generate auxiliary features, particularly for knowledge grounding tasks. Given that discriminative features are often manually designed, CAAFE [265] explores the use of LLMs to generate auxiliary features based on task and feature semantics. The quality of these features is then evaluated using a general tabular model, TabPFN [89]. FeatLLM [266] enhances feature generation by incorporating example-based prompting, enabling LLMs to create new features based on textual descriptions. TaPTaP [235] is expected to capture a generic tabular data distribution after ongoing pre-training on a large-scale corpus of real-world tabular data, generating high-quality synthetic tables to support various applications on tabular data.", + "bbox": [ + 503, + 198, + 926, + 445 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Language Models for Textual Serialization. A direct approach to incorporating pre-trained language models involves converting tabular data into a textual format, allowing LLMs to infer relationships between features and labels based on embedded expert knowledge. This concept has been validated in semantic parsing tasks [267], [268]. LIFT [236] and TabLLM [80] serialize tabular data by integrating feature names into text and combining them with task descriptions. This enables LLMs to treat tabular prediction tasks as text generation problems. LIFT fine-tunes models on the entire training set, while TabLLM employs few-shot learning for fine-tuning. UniPredict [237] constructs prompts using metadata, sample serialization, and task instructions, fine-tuning LLMs with confidence-weighted augmented labels predicted by an external model. The approach is validated on multiple in-distribution datasets.", + "bbox": [ + 503, + 446, + 924, + 678 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Despite their advantages, textual serialization methods face challenges when the number of features increases, as prompts may become too large to fit within the model's context window. The effectiveness of LLMs in tabular data tasks remains constrained by the availability of semantic information and the capabilities of external tabular models. Further exploration of LLM-based methods will be discussed in the general tabular models in Section 7.", + "bbox": [ + 503, + 678, + 924, + 796 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.4 Reusing a Pre-trained Vision Model", + "text_level": 1, + "bbox": [ + 503, + 806, + 808, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Given the success of deep neural networks (DNNs) in visual tasks, it is intuitive to leverage the strong recognition capabilities of pre-trained vision models for tabular data. Additionally, data augmentation strategies commonly used in image processing can be introduced after transforming tabular data into a visual format. Similar ideas have been explored in time series forecasting [269] and irregular time series classification [270].", + "bbox": [ + 501, + 825, + 924, + 941 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/1cb1d949fe6300f2c81dade78b72b5132ea16551e7587e0e4a4975de40932726.jpg", + "image_caption": [ + "Figure 8: Illustration of transferable tabular methods with a vision model. Tabular data can be transformed into images through dimensionality reduction, table reorganization, and the use of image markers." + ], + "image_footnote": [], + "bbox": [ + 81, + 56, + 486, + 181 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The primary challenge lies in representing tabular instances in an image-compatible format. In natural images, neighboring pixels often share semantic relationships, whereas tabular data lacks inherent spatial structure. Features in a tabular instance are permutation-invariant, meaning that exchanging their order does not alter the instance's meaning. Various methods have been proposed to transform tabular data into visual representations, enabling the application of pre-trained vision models fine-tuned for tabular tasks. This subsection highlights different transformation strategies that transfer tabular datasets into images.", + "bbox": [ + 71, + 266, + 491, + 426 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Dimensionality Reduction Transformation. Visualization strategies for tabular data naturally convert tables into images by embedding high-dimensional features into a lower-dimensional space. DeepInsight [238] projects tabular data into a 2D space using t-SNE and constructs images through convex hull analysis, applying translation, rotation, quantization, and normalization. REFINED [239] employs Bayesian Metric Multidimensional Scaling to preserve pairwise distances within the low-dimensional representation, ensuring that structurally similar features remain proximate in the transformed image.", + "bbox": [ + 71, + 428, + 491, + 589 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table Reorganization Transformation. A tabular dataset $\\mathcal{D}$ can be treated as a matrix and represented as a single-channel image or kernel. To enable visual PTMs to recognize meaningful spatial relationships, different strategies have been developed for structuring tabular data into images. Tabular Convolution (TAC) [240] arranges data samples into zero-mean square matrices (kernels) of odd integer dimensions. These kernels are then convolved with a fixed \"base image,\" and the resulting images are subsequently fed to a CNN for classification. Image Generator for Tabular Data (IGTD) [74] and TabEye [75] share a similar idea, generating an image for each data sample where pixel intensities correspond directly to feature values. These methods prioritize placing similar features in close proximity but struggle with high-dimensional tabular tasks. LM-IGTD [241] extends IGTD by incorporating stochastic feature generation to enhance robustness and generalization.", + "bbox": [ + 71, + 590, + 491, + 838 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Image Marker Transformation. Another approach involves encoding feature values as visual markers within an image. Super-TML [242] assigns feature values to predetermined positions within an image, effectively handling categorical and numerical datasets. Tab2Visual [76] normalizes tabular data and represents each instance as a row of multiple bars, each corresponding to a specific value. Each feature", + "bbox": [ + 71, + 840, + 491, + 944 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "is assigned a unique color to enhance visual differentiation, while bar widths are proportional to feature magnitudes.", + "bbox": [ + 503, + 53, + 924, + 82 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "By transforming tabular data into images, these methods enable the application of powerful pre-trained vision models to tabular prediction tasks, leveraging established deep learning techniques from the vision domain to enhance tabular model performance.", + "bbox": [ + 503, + 82, + 921, + 157 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "7 FROM TRANSFERABLE TO GENERAL MODEL", + "text_level": 1, + "bbox": [ + 504, + 176, + 897, + 193 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The general model (also referred to as the tabular foundation model) represents an advancement over the transferable model. It extends the generalization capabilities of a pretrained tabular model to a variety of heterogeneous downstream tabular tasks, regardless of their diverse feature and class spaces, without requiring additional fine-tuning. In other words, given a pre-trained model $g_{\\Theta}$ , it can be directly applied to a downstream tabular task $\\mathcal{D}$ to predict the label of a test instance $x^{*}$ as follows:", + "bbox": [ + 501, + 196, + 924, + 329 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {y} ^ {*} = g _ {\\Theta} \\left(\\boldsymbol {x} ^ {*} \\mid \\mathcal {D}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 648, + 338, + 923, + 354 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Thus, the general model shares similarities with the transferable tabular model, but with a greater emphasis on the \"zero-shot\" ability, aims to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. Importantly, it does not require an Adapt function, which further reduces the computational cost of hyper-parameter tuning. The goal of the general tabular model is to achieve better generalization on downstream tabular datasets $\\mathcal{D}$ when compared to alternative strategies, such as training a tabular model directly on $\\mathcal{D}$ or adapting a transferable model.", + "bbox": [ + 501, + 362, + 924, + 523 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Remark 6. Distinguishing between an advanced transferable tabular model, pre-trained on a wide range of heterogeneous tabular tasks, and the general tabular model can be challenging. Some transferable tabular models, based on auxiliary feature semantics, are able to predict labels for downstream test instances directly [80]. However, their prediction ability is constrained and typically applicable only in specific areas after fine-tuning [78], [233]. The general tabular model, on the other hand, is designed to handle a wider range of heterogeneous tabular tasks, sharing similar pre-training challenges with transferable models but without utilizing additional semantics. Fine-tuning a pre-trained general model is also an option for further performance improvements [93], [96].", + "bbox": [ + 504, + 526, + 924, + 731 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Pre-training has revolutionized domains such as vision and language [271], [84], but its adoption in tabular data remains limited due to the inherent heterogeneity of tabular datasets. Tabular datasets can vary significantly in both dimensionality (i.e., the number of columns) and the semantic meaning of each dimension, even within the same application. For example, different healthcare datasets may capture varying levels of detail and aspects of patient information. Even within the same feature entry (e.g., the $d$ -th column), the meaning can vary (e.g., \"age\" vs. \"height\"). This contrasts with vision and text data (within the same language), where different data sources typically share the same \"vocabulary\" (e.g., pixels, patches, or sub-words) and similar relationships between vocabulary \"elements\" (e.g., neighboring pixels", + "bbox": [ + 501, + 737, + 926, + 944 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/2810f4f79d0742a7f04b257adc7a6d771ff30010de75e71b2e070205dd3e7735.jpg", + "image_caption": [ + "Figure 9: Illustration of general methods. These methods handle inherent heterogeneity by improving the model's adaptability or homogenizing the diverse tabular formats. Once pre-trained, they can be directly applied to downstream tasks without fine-tuning." + ], + "image_footnote": [], + "bbox": [ + 76, + 54, + 279, + 161 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/0c3b348377cdacba43d6c9c27c9890a6b7801bef55e3b56fb722125736d11ff9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 55, + 491, + 161 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "often share colors). The lack of shared vocabulary and relationships in tabular data makes it challenging to jointly train a model across multiple datasets, let alone apply a pre-trained model directly to new downstream tasks.", + "bbox": [ + 71, + 252, + 491, + 311 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "There are two main strategies to address the inherent heterogeneity in tabular datasets: improving the model's adaptability or homogenizing the diverse tabular formats. We categorize general tabular models into three parts based on their strategies for achieving generalizability. The first focuses on raw-feature-based approaches, among which TabPFN variants represent a rapidly evolving branch and are thus discussed separately. The third category encompasses semantic-based methods that leverage attribute and task semantics to unify heterogeneous tasks.", + "bbox": [ + 71, + 311, + 491, + 458 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "7.1 Raw-Feature-based General Models", + "text_level": 1, + "bbox": [ + 73, + 477, + 372, + 491 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To adapt a general tabular model to heterogeneous tabular datasets during the pre-training and fine-tuning stages, two main strategies can be used from the data-centric and model-centric perspectives. From the data-centric perspective, the general model may standardize tabular datasets into a homogeneous form. For instance, TabPTM [86] transforms all datasets into a uniform format using meta-representation to enable pre-training. The pre-trained model can then be applied directly to a downstream dataset or fine-tuned without introducing additional parameters.", + "bbox": [ + 71, + 494, + 490, + 641 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Alternatively, from the model-centric perspective, the general model may improve adaptability by tailoring it to specific tabular tasks. HyperFast [87] adopts the concept of a Hyper Network [272] in meta-learning [273], where a mapping from the tabular dataset to the weights of a classifier is learned. This mapping can then be used to predict labels for unseen instances from the task. To address datasets with varying dimensions, HyperFast projects datasets into a fixed size using random projections. To overcome the slow weight generation speed, MotherNet accelerates HyperFast by modifying its architecture with Transformer-like modules [88].", + "bbox": [ + 71, + 642, + 491, + 803 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "7.2 TabPFN Variants", + "text_level": 1, + "bbox": [ + 73, + 821, + 236, + 834 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The TabPFN family of models [89], [91] leverages the incontext learning capabilities of transformers, directly predicting labels by adapting test instances according to the context of training examples. In the first version of TabPFN, an instance $\\boldsymbol{x}_i$ is padded to a fixed dimension (e.g., 100), and the features are projected to a higher dimension (e.g., $d'$ ) for further processing. The label $y_i$ is processed similarly and", + "bbox": [ + 71, + 839, + 491, + 944 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "added to the instance embeddings. The embeddings of all $N + 1$ instances, including training and test instances, are formulated into a set of $N + 1$ tokens with $d'$ dimensions. These tokens are processed through several layers of a Transformer, and the output token corresponding to the test instance is further predicted using a 10-way classifier. TabPFN is pretrained over synthetically generated datasets with structured causal models (SCM) [274] and Bayesian Neural Networks (BNNs) [275], [276], enabling the strong in-context learning ability, with the best checkpoint selected based on some real-world datasets. Due to the high complexity of transformers, TabPFN is limited to small-scale tasks, with suggested sizes of $N < 1000$ , $d < 100$ , and $C < 10$ .", + "bbox": [ + 501, + 53, + 924, + 241 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "TabPFN v2 introduces a specialized feature tokenizer to better handle heterogeneity. Specifically, each cell in the table is projected to a $k$ -dimensional vector using a shared mapping, and random position encoding vectors are added to differentiate features [277]. This results in a tensor of size $(N + 1) \\times (d + 1) \\times k$ when there is a single test instance. The label of each instance is processed similarly, and the mapped $k$ -dimensional token is concatenated with the instance tokens. A dummy label (e.g., the average of all labels) is used for the test instance since its label is unknown. A two-way attention mechanism is used, with each feature attending to the other features in its row and then attending to the same feature across its column [278]. The output token corresponding to the label of the test instance is further mapped to a 10-class classifier or regressor. Several improvements have been made in TabPFN v2, including increased context size ( $N < 10000$ , $d < 500$ ), automatic feature engineering, and post-hoc ensemble methods. [279] analyzes TabPFN from a bias-variance perspective, shedding light on its generalization capabilities. Various applications have also been explored, including tabular data generation [280], anomaly detection [281], data augmentation [282], and time series forecasting [283].", + "bbox": [ + 501, + 242, + 926, + 561 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The improvements of TabPFN (especially TabPFN v1) stem from several aspects.", + "bbox": [ + 503, + 563, + 924, + 592 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Pre-training Improvements. TabForestPFN [284] extends TabPFN by pre-training In-Context Learning (ICL)-transformers on a new forest dataset generator that creates unrealistic datasets with complex decision boundaries. TabDPT [179] pre-trains the architecture on real-world datasets using self-supervised learning and retrieval objectives, making it suitable for both classification and regression tasks. APT [285] is pre-trained utilizing adversarial synthetic data generated by adaptive agents, which systematically modify the underlying data-generating distribution and deliberately challenge the model with diverse synthetic datasets to enhance its robustness and generalization capabilities. TabICL [286] integrates tree-based SCMs using XGBoost [130] to model complex interactions and employs curriculum learning by progressively increasing synthetic dataset sizes. Scalable Improvements. The efficiency of TabPFN is highly sensitive to context size, prompting strategies to enhance scalability and performance [39]. These include compressing training data into a compact learned representation using sketching [287] or prompt tuning techniques [288], [289],", + "bbox": [ + 501, + 592, + 924, + 885 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1. Some variants of TabPFN are not considered general tabular models, especially the latter parts, as they require additional fine-tuning steps. We place them in this subsection due to their strong relationship with TabPFN.", + "bbox": [ + 503, + 893, + 924, + 941 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 32, + 413, + 44 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "employing adaptive data selection methods to identify the most pertinent training examples for each test instance [290], [90], [179], [291], and replacing traditional quadratic attention with computationally efficient linear attention mechanisms [292] and state-space models (SSMs) [293].", + "bbox": [ + 71, + 53, + 491, + 126 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Adaptation Improvements. Some approaches improve TabPFN's performance on downstream tasks by adapting the context [90] or fine-tuning specific parts of the model [96], [284], [290], [289]. TabICL [286] employs a column-then-row attention mechanism to construct fixed-dimensional embeddings of rows, which are subsequently processed by a transformer like TabPFN v1 to facilitate efficient in-context learning. EquiTabPFN [294] introduces self-attention across target components, ensuring that the arbitrary ordering of target dimensions does not influence model predictions, enhancing the performance of TabPFN v1 to some extent.", + "bbox": [ + 71, + 127, + 491, + 287 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "7.3 Semantics-based General Models", + "text_level": 1, + "bbox": [ + 73, + 308, + 356, + 321 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "By leveraging the semantic structure of tabular data, such as column names, heterogeneous tasks can be projected into a shared language space. This allows a single language model, pre-trained on diverse tabular datasets, to handle unseen tasks in a unified manner. TabuLa-8B [92] fine-tunes a Llama 3-8B LLM for tabular data prediction (classification and binned regression) using a novel packing and attention scheme for tabular prediction. GTL [93] transforms tabular datasets into an instruction-oriented language format, facilitating the continued pre-training of LLMs on instruction-oriented tabular data, which demonstrates strong performance in few-shot scenarios. GTL-S [295] unlocks the potential of GTL from a scaling perspective, revealing that scaling datasets and prediction tasks enhance generalization. [94] extends GTL by incorporating retrieval-augmented LLMs for tabular data, combined with retrieval-guided instruction-tuning for LLMs. MediTab [243] uses a data engine that leverages LLMs to consolidate tabular samples to overcome the barrier across tables with distinct schema. MediTab aligns out-domain data with the target task using a \"learn, annotate, and refinement\" pipeline, enabling the pre-trained model to infer for arbitrary tabular input in the domain without fine-tuning.", + "bbox": [ + 71, + 328, + 493, + 664 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "8 TABULAR ENSEMBLE METHODS", + "text_level": 1, + "bbox": [ + 73, + 686, + 366, + 702 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Ensemble learning is a natural way to improve the generalization ability of multiple base learners by leveraging their diversity. Classical methods such as Random Forest [127] and AdaBoost [126], [296] employ bagging and boosting, respectively, by ensembling multiple decision trees. These methods have proven effective for tabular data, as they reduce bias/variance and improve robustness [297].", + "bbox": [ + 71, + 708, + 491, + 811 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In deep tabular learning, ensemble methods can be categorized into two primary approaches: joint-training ensembles, where multiple sub-networks are aggregated within a single training pipeline, and post-hoc ensembles, where the predictions from multiple pre-trained deep tabular models are fused. One major challenge in ensembling deep tabular methods is computational efficiency, as training multiple deep models or sub-models can be computationally expensive and time-consuming.", + "bbox": [ + 71, + 811, + 491, + 944 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "8.1 Joint-Training Ensembles", + "text_level": 1, + "bbox": [ + 504, + 53, + 736, + 68 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Joint-training ensemble methods integrate diverse model architectures within a single training process to improve predictive performance while maintaining efficiency. These architectures often combine different types of models, such as linear and non-linear models [28] or tree-based and deep neural network-based approaches [63]. Tree-mimic methods leverage this concept by mixing predictions from multiple tree nodes to enhance robustness [60], [59], [193].", + "bbox": [ + 501, + 74, + 924, + 191 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "To improve efficiency while maintaining predictive power, various techniques have been explored. Some approaches employ parameter-efficient ensembles, such as TabM [176], which uses MLPs as base learners and incorporates BatchEnsemble [298] to generate multiple diverse base learners efficiently. This prevents a large increase in the number of learnable parameters while maintaining model diversity. Similarly, BETA leverages pre-trained TabPFN by generating multiple base learners through additional parameter tuning [96]. Specifically, BETA learns multiple feature projections, feeding the projected training sets into TabPFN and aggregating the results while applying BatchEnsemble to reduce the number of additional learnable parameters.", + "bbox": [ + 503, + 191, + 926, + 381 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Some hybrid approaches, such as LLM-Boost and PFN-Boost, have been developed to integrate large language models and TabPFN with gradient-boosted decision trees [299]. In these approaches, LLMs and PFN serve as the initial base learners, and additional base learners are sequentially trained in a boosting manner. This approach leverages the strong prior knowledge from LLMs and TabPFN while maintaining the scalability of gradient-boosted decision trees.", + "bbox": [ + 503, + 382, + 926, + 501 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "8.2 Post-Hoc Ensembles", + "text_level": 1, + "bbox": [ + 504, + 525, + 702, + 539 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Post-hoc ensemble (PHE) methods involve combining multiple trained models to improve robustness and accuracy. Bagging-based ensembles are one of the most direct post-hoc strategies, where usually multiple models trained with different random seeds are aggregated [33], [69]. Although this approach improves model robustness, it incurs high computational overhead. Some recent studies have demonstrated that LLM-based methods exhibit diverse prediction behaviors compared to deep tabular models that do not utilize attribute names [94]. This difference in prediction styles enhances their complementarity, making them ideal candidates for ensemble methods.", + "bbox": [ + 501, + 547, + 926, + 720 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Instead of explicitly training multiple models, perturbation-based approaches create diverse predictions from the same pre-trained model. One such method applies feature permutation with TabPFN, leveraging the fact that TabPFN is not fully feature permutation-invariant [89]. A perturbation-based ensemble can be formed by randomly permuting the feature order in both the training and test sets and making predictions multiple times, generating multiple diverse predictors without additional training costs. TabPFN v2 introduces additional perturbations to enhance diversity among several key factors, including variations in feature encoding, feature quantization, categorical feature shuffling, SVD-based feature compression, outlier removal, and power transformations such as the Yeo-Johnson transformation [91]. These randomly selected transformations create diverse", + "bbox": [ + 503, + 723, + 926, + 941 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "prediction patterns, enabling effective ensemble learning without requiring multiple separately trained models.", + "bbox": [ + 71, + 53, + 491, + 82 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Another post-hoc ensemble strategy employed in TabPFN v2 is the use of Portfolio-Based Ensemble, where a fixed set of TabPFN configurations is used [91]. A greedy ensemble selection technique is then applied to learn optimal weights for aggregating the predictions of different configurations [300]. By combining multiple perturbed models, this method improves generalization without excessive training costs. Some methods apply ensemble techniques to TabPFN v1 to handle large datasets. For instance, TabPFN-Bagging [96], [301] divides large datasets into multiple context groups, with the final results averaged to mitigate variance. BoostPFN [301] treats TabPFN v1 as weak learners, where each weak learner uses a subset of the training data as context. This approach allows BoostPFN to outperform standard Prior Fitted Networks (PFNs) on large datasets.", + "bbox": [ + 71, + 83, + 491, + 303 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "9 EXTENSIONS", + "text_level": 1, + "bbox": [ + 73, + 330, + 210, + 347 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this section, we briefly introduce some extensions on deep tabular methods across different complex tasks.", + "bbox": [ + 71, + 356, + 488, + 385 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Clustering. Traditional clustering approaches often leverage enhanced distance metrics, such as the Gower distance [302], which is specifically designed for mixed data types, and interpretable prototypes, such as K-medoids. Recent advances in tabular data clustering have sought to integrate interpretability constraints with deep representation learning. For example, IDC [97] introduces a deep learning framework for general tabular data that predicts interpretable cluster assignments at both the instance and cluster levels. To address overlapping clusters, TableDC [98] integrates the Mahalanobis distance, which accounts for variance and correlation within the data. This method provides a similarity measure suitable for tables, rows, or columns in high-dimensional latent spaces.", + "bbox": [ + 71, + 386, + 491, + 590 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Anomaly Detection. Anomaly detection in tabular data is crucial for identifying subtle irregularities in structured datasets, such as fraudulent transactions or equipment failures. While classical techniques like Isolation Forest [303] and Local Outlier Factor [304] remain foundational, recent developments have incorporated various methods to capture contextual relationships in high-dimensional data. For instance, [305] introduces a method that learns mappings that maximize mutual information between each sample and the part that is masked out, capturing the structural nuances of samples from a single training class. ADBench [99] provides a comprehensive tabular anomaly detection benchmark with 30 algorithms and 57 benchmark datasets. Additionally, large language models (LLMs) have also been employed for anomaly detection in tabular data [306].", + "bbox": [ + 71, + 590, + 491, + 810 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Tabular Generation. Tabular data generation has become an essential tool for synthetic data creation, privacy preservation, and addressing data scarcity. Traditional methods, such as Bayesian networks or GANs, focus on mimicking marginal distributions, while recent advancements emphasize preserving complex feature dependencies and semantic consistency. For instance, tabular diffusion models [307] iteratively refine synthetic data to capture subtle correlations in high-dimensional datasets, outperforming GANs in terms of data", + "bbox": [ + 71, + 811, + 491, + 941 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "fidelity. [308] introduces high-order structural causal information as a natural prior knowledge and offers a benchmark framework for evaluating tabular synthesis models. Despite these advances, challenges remain in balancing realism with privacy, such as avoiding identity leakage in sensitive datasets, and scaling to heterogeneous data types. Hybrid neuro-symbolic models [309] bridge this gap to provide trustworthy synthetic data for downstream tasks.", + "bbox": [ + 501, + 53, + 921, + 169 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Interpretability. Traditional gradient-boosted decision trees (GBDTs) inherently provide interpretability through feature importance scores and decision path visualization. Frameworks such as XGBoost [130] and LightGBM [131] quantify feature importance using metrics like split frequency and information gain. SHAP values [310] enable instance-level explanations by decomposing model predictions into feature contributions. The additive nature of GBDTs allows for partial dependence plots [311] to visualize feature effects while controlling for interactions. NeC4.5 [253], a novel decision tree algorithm that integrates the comprehensibility of decision trees with the generalization ability of neural network ensembles. By training a neural network ensemble to generate a new training set, NeC4.5 enhances decision tree performance while maintaining interpretability.", + "bbox": [ + 503, + 170, + 921, + 388 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Recent deep models specifically designed for tabular data have introduced novel interpretability mechanisms. For example, NAMs [255] combine some of the expressivity of DNNs with the inherent intelligibility of generalized additive models. They learn a linear combination of neural networks that each attend to a single input feature, which are trained jointly and can learn arbitrarily complex relationships between their input feature and the output. TabNet [105] uses sequential attention with learnable feature masks, where each decision step explicitly selects a subset of features via sparse masking. The aggregated feature usage across steps provides global interpretability comparable to GBDT's feature importance. Subsequent variants, such as TabTransformer [63], enhance interpretability by visualizing cross-feature attention patterns. FT-Transformer [33] combines feature tokenization with explainable attention, while NODE [60], NODE-GAM [61] and DOFEN [312] generalize ensembles of oblivious decision trees, benefiting from both end-to-end gradient-based optimization and multi-layer hierarchical representation learning.", + "bbox": [ + 503, + 388, + 921, + 679 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Open-Environment Tabular Machine Learning. Research on distribution shifts in tabular data starts with domain-to-domain shifts [110], which are commonly categorized based on the availability of target domain data. When target data is available, transfer learning techniques such as unsupervised domain adaptation [313] and test-time adaptation [314] are widely used. These methods adapt model parameters using test-time inputs but rely on access to target distributions, which may not always be feasible. In contrast, when target data is unavailable, a more practical but challenging scenario, methods aiming to enhance robustness and generalization, using approaches such as domain generalization [315], domain robustness [316], [317], label robustness [318] or ensemble strategies [95]. TableShift [110] provides a detailed analysis of this scenario.", + "bbox": [ + 503, + 680, + 921, + 897 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Beyond domain-to-domain shifts, temporal shifts are more general and complex. TabReD [109] emphasizes the inherent temporality of real-world tabular data, advocating", + "bbox": [ + 503, + 898, + 923, + 941 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "for temporal splits for training and testing. [319] further propose a refined training protocol focusing on temporal evaluation, significantly improving generalization across models. To address temporal shifts, it's critical to incorporate temporal information [319]. Drift-Resilient TabPFN [174] models temporal shifts with a secondary SCM, which specifies changes in the primary model parameters. [319] introduce a plug-and-play temporal embedding that effectively captures trend and periodicity patterns, providing an adaptive mechanism to mitigate the impact of temporal shifts. Under temporal shift conditions, most methods experience performance degradation, while TabM [95] exhibits relative robustness [109]. However, [319] demonstrate that with the refined training protocol and temporal embedding, methods such as ModernNCA [35] can regain competitiveness.", + "bbox": [ + 71, + 53, + 491, + 272 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Multi-modal Learning with Tabular Data. Text, such as feature names, can be effectively utilized to enhance tabular data learning, as discussed in Section 6. Here, we focus on interactions with the image modality, e.g., in healthcare, where medical images require specialized equipment and expert knowledge, often in tabular form, for accurate diagnosis [320]. To tackle challenges like large medical datasets and high annotation costs, MMCL [106] uses a contrastive self-supervised learning framework that integrates images and tabular data. CHARMS [107] transfers expert knowledge from tabular data to images, improving image predictions even without tabular data during inference, thus reducing reliance on costly expert annotations. TIP [321] proposes a self-supervised learning strategy with a tabular encoder for incomplete, heterogeneous data and a multimodal interaction module for inter-modality representation learning.", + "bbox": [ + 71, + 281, + 491, + 515 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Tabular Understanding. Tabular understanding involves comprehending the information contained within a table and can be broken down into several tasks. For example, Table Detection (TD) [322], [323] refers to identifying the region of the image that contains the table while Table Structure Recognition (TSR) [324], [325] involves the identification of the rows and columns to identify individual table cells, which aims to recognize the cellular structures of tables from table images by extracting the coordinates of cell boxes and row/column spanning information. Table Question Answering (TQA) [326], [327], [112] refers to providing precise answers from tables to answer a user's question. Traditional methods, whether OCR-based [328], [329], [330] or OCR-free [331], [332], [333], [334], [335], have made significant strides in TSR and TD, which are relatively simpler tasks.", + "bbox": [ + 71, + 523, + 491, + 743 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "More complex tasks, such as TQA, have also been the focus of considerable effort. For example, Donut [332] proposes a novel task and a synthetic document image generator to pre-train the model, reducing reliance on large-scale real document images. Monkey and TextMonkey [336], [337] utilize shifted window attention and use similarity measures to filter out redundant tokens. mPLUG-DocOwl [338] adapts mPLUG-Owl for OCR-free document understanding, while TabPedia [335] constructs low- and high-resolution vision encoders with a concept synergy mechanism for visual table understanding. [339] focuses on exploring various table representations and directly prompting LLMs to improve performance. Please refer to [112], [113] for more details.", + "bbox": [ + 71, + 752, + 491, + 941 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "10 DISCUSSIONS", + "text_level": 1, + "bbox": [ + 506, + 51, + 663, + 66 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we discuss several possible future directions for tabular machine learning, particularly in light of the significant potential demonstrated by tabular general/foundation models.", + "bbox": [ + 503, + 78, + 924, + 136 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The Ability to Handle Dynamic and Open Environments. Tabular models, particularly foundation models, will increasingly need to operate in dynamic, real-world environments where data evolves over time [340]. One of the key challenges is dealing with imbalanced datasets [155], where certain classes may be underrepresented, and the distribution of data may shift over time [110]. As a result, models need to adapt to these changes and continue providing accurate predictions. Additionally, the emergence of new classes in the data may require the model to evolve and update its predictions in real-time [341]. This calls for methods that ensure tabular foundation models can accommodate evolving data, handling both new classes and changing distributions effectively.", + "bbox": [ + 503, + 137, + 924, + 340 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The Coverage and Scope of Tabular Foundation Models. Current tabular foundation models have demonstrated strong performance on various unseen classification and regression tasks. However, several important questions remain about their capabilities. For instance, in addition to in-context learning [246], are there other prediction strategies that could be employed to further enhance the versatility and performance of tabular foundation models? Beyond classification and regression, can these models be extended to handle related tasks such as clustering, imputation, outlier detection, or even table-based question answering (QA)? Expanding the task scope could increase the model's utility in a wide range of applications. Furthermore, it is worth investigating whether there is a scaling law [342] for tabular foundation models. Currently, tabular checkpoints are relatively small compared to foundation models in other modalities, such as language or vision. Understanding the implications of scaling these models—particularly the trade-offs between model size and performance—will be crucial for their future development.", + "bbox": [ + 503, + 342, + 924, + 633 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Will Foundation Models Always Help? While foundation models have demonstrated impressive generalization abilities, there are inherent trade-offs. Similar to ensemble learning, a single foundation model may provide an \"average\" predictive ability across tasks, potentially losing specialized expertise for specific tasks. To address this, a promising approach could be the development of a \"tabular model zoo\" [343], [344]. In this paradigm, different pre-trained models, potentially including models from other domains, could be combined for a specific tabular task. Given a new task, suitable pre-trained models could be selected, adapted if necessary, and integrated for improved performance.", + "bbox": [ + 503, + 635, + 924, + 810 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Model Efficiency. In many real-world applications, tabular datasets are large and high-dimensional, posing significant challenges for both training and inference [345], [44]. One area of concern is how to handle extreme cases, such as when the data is exceptionally large or sparse. Foundation models should be able to scale effectively in these scenarios without sacrificing performance. Another issue is inference speed. In large-scale problems, timely predictions are essential, especially when deployed in real-time environments [292]. Opti-", + "bbox": [ + 503, + 811, + 926, + 941 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 31, + 413, + 44 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "mizing the inference process is therefore critical to ensure that predictions can be made quickly on large, complex datasets. Lastly, the computational resources required for training and deploying foundation models can be substantial [346]. Optimizing resource usage through methods such as model pruning, quantization, and efficient training algorithms will be important to ensure that these models remain practical and accessible for a wide range of applications.", + "bbox": [ + 71, + 53, + 491, + 169 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Bridging the Gap Between Tabular Data and Other Modalities. Tabular data often coexists with other data modalities, such as images and text. One of the exciting challenges in the field is how to effectively integrate tabular data with foundation models from other domains [347]. Combining the strengths of tabular models with those of vision or language models could result in more powerful and versatile models capable of handling multimodal data. Exploring how to seamlessly integrate these modalities—whether through joint embeddings, cross-modal attention mechanisms, or other techniques—could unlock significant advances in tasks that require both structured tabular data and unstructured data sources like images or text.", + "bbox": [ + 71, + 170, + 491, + 359 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "11 CONCLUSION", + "text_level": 1, + "bbox": [ + 73, + 376, + 225, + 390 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Tabular data remains a cornerstone of real-world machine learning applications, and the advancement of deep learning has opened new possibilities for effective representation learning in this domain. In this survey, we present a comprehensive overview of deep tabular representation learning, covering its background, challenges, evaluation benchmarks, and the discussion between tree-based models and DNNs. We systematically categorize existing methods into three categories—specialized, transferable, and general models—based on their generalization capabilities. In addition, we discuss ensemble techniques, extensions, and some promising future directions, such as open-environment and multimodal tabular learning. We hope this survey serves as a valuable reference for understanding the current state of the field and inspires further progress in developing more robust and generalizable tabular learning methods.", + "bbox": [ + 71, + 395, + 493, + 630 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 73, + 646, + 187, + 660 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] B. Kovalerchuk and E. Vityaev, Data mining in finance: advances in relational and hybrid methods. Springer Science & Business Media, 2005. 1", + "[2] S. L. Hyland, M. Faltys, M. Hüser, X. Lyu, T. Gumbsch, C. Esteban, C. Bock, M. Horn, M. Moor, B. Rieck et al., \"Early prediction of circulatory failure in the intensive care unit using machine learning,\" Nature medicine, vol. 26, no. 3, pp. 364-373, 2020. 1", + "[3] C. Romero and S. Ventura, \"Educational data mining: a review of the state of the art,\" IEEE Transactions on Systems, Man, and Cybernetics, vol. 40, no. 6, pp. 601-618, 2010. 1", + "[4] X. Amatriain, A. Jaimes, N. Oliver, and J. M. Pujol, \"Data mining methods for recommender systems,\" in Recommender systems handbook. Springer, 2010, pp. 39-71. 1", + "[5] R. Tibshirani, T. Hastie, B. Narasimhan, and G. Chu, \"Diagnosis of multiple cancer types by shrunken centroids of gene expression,\" Proceedings of the National Academy of Sciences, vol. 99, no. 10, pp. 6567-6572, 2002. 1, 4", + "[6] O. Ivanciuc et al., \"Applications of support vector machines in chemistry,\" Reviews in computational chemistry, vol. 23, p. 291, 2007. 1", + "[7] N. K. Ahmed, A. F. Atiya, N. E. Gayar, and H. El-Shishiny, \"An empirical comparison of machine learning models for time series forecasting,\" Econometric reviews, vol. 29, no. 5-6, pp. 594-621, 2010." + ], + "bbox": [ + 73, + 666, + 491, + 939 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[8] M. R. Allen and D. A. Stainforth, \"Towards objective probabilistic climate forecasting,\" Nature, vol. 419, no. 6903, pp. 228-228, 2002. 1", + "[9] V. Borisov, T. Leemann, K. Seßler, J. Haug, M. Pawelczyk, and G. Kasneci, \"Deep neural networks and tabular data: A survey,\" IEEE Transactions Neural Networks and Learning Systems, vol. 35, no. 6, pp. 7499-7519, 2024. 1, 4, 7, 8", + "[10] C. C. Aggarwal, Data Mining - The Textbook. Springer, 2015. 1", + "[11] Z. Ji, Z. C. Lipton, and C. Elkan, \"Differential privacy and machine learning: a survey and review,\" CoRR, vol. abs/1412.7584, 2014. 1", + "[12] M. F. Delgado, E. Cernadas, S. Barro, and D. G. Amorim, \"Do we need hundreds of classifiers to solve real world classification problems?\" Journal of Machine Learning Research, vol. 15, no. 1, pp. 3133-3181, 2014. 1, 5, 6", + "[13] C. Bishop, Pattern recognition and machine learning. Springer, 2006. 1", + "[14] T. Hastie, R. Tibshirani, and J. H. Friedman, The Elements of Statistical Learning: Data Mining, Inference, and Prediction, 2nd Edition. Springer, 2009. 1, 4", + "[15] M. Mohri, A. Rostamizadeh, and A. Talwalkar, Foundations of Machine Learning. MIT Press, 2012. 1", + "[16] K. P. Murphy, Probabilistic Machine Learning: An introduction, ser. Adaptive computation and machine learning series. MIT Press, 2022. 1", + "[17] A. Voulodimos, N. Doulamis, A. Doulamis, E. Protopapadakis et al., \"Deep learning for computer vision: A brief review,\" Computational intelligence and neuroscience, vol. 2018, 2018. 1", + "[18] D. W. Otter, J. R. Medina, and J. K. Kalita, \"A survey of the usages of deep learning for natural language processing,\" IEEE transactions on neural networks and learning systems, vol. 32, no. 2, pp. 604-624, 2020. 1", + "[19] Y. Bengio, A. Courville, and P. Vincent, \"Representation learning: A review and new perspectives,\" IEEE transactions on pattern analysis and machine intelligence, vol. 35, no. 8, pp. 1798-1828, 2013. 1", + "[20] Y. LeCun, Y. Bengio, and G. Hinton, \"Deep learning,\" nature, vol. 521, no. 7553, pp. 436-444, 2015. 1", + "[21] I. Goodfellow, Y. Bengio, and A. Courville, Deep learning. MIT press, 2016. 1", + "[22] J. Donahue, Y. Jia, O. Vinyals, J. Hoffman, N. Zhang, E. Tzeng, and T. Darrell, \"Decaf: A deep convolutional activation feature for generic visual recognition,\" in ICML, 2014, pp. 647-655. 1", + "[23] G. E. Hinton and R. R. Salakhutdinov, \"Reducing the dimensionality of data with neural networks,\" science, vol. 313, no. 5786, pp. 504-507, 2006. 2, 4", + "[24] J. Weston, F. Ratle, and R. Collobert, \"Deep learning via semi-supervised embedding,\" in ICML, 2008, pp. 1168-1175. 2, 4", + "[25] L. Van Der Maaten, \"Learning a parametric embedding by preserving local structure,\" in AISTATS, 2009, pp. 384-391. 2, 4", + "[26] M. R. Min, L. Maaten, Z. Yuan, A. J. Bonner, and Z. Zhang, \"Deep supervised t-distributed embedding,\" in ICML, 2010, pp. 791-798. 2, 4", + "[27] W. Zhang, T. Du, and J. Wang, \"Deep learning over multi-field categorical data -- A case study on user response prediction,\" in ECIR, 2016, pp. 45-57. 2, 4", + "[28] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah, \"Wide & deep learning for recommender systems,\" in DLRS, 2016, pp. 7-10. 2, 4, 19", + "[29] K. G. Mehrotra, C. K. Mohan, H. Huang, K. G. Mehrotra, C. K. Mohan, and H. Huang, Anomaly detection. Springer, 2017. 2, 4", + "[30] F. O. Isinkaye, Y. O. Folajimi, and B. A. Ojokoh, \"Recommendation systems: Principles, methods and evaluation,\" Egyptian informatics journal, vol. 16, no. 3, pp. 261-273, 2015. 2, 4", + "[31] S. S. Rangapuram, M. W. Seeger, J. Gasthaus, L. Stella, Y. Wang, and T. Januschowski, \"Deep state space models for time series forecasting,\" in NeurIPS, 2018, pp. 7796-7805. 2, 4", + "[32] B. Lim and S. Zohren, \"Time-series forecasting with deep learning: a survey,\" Philosophical Transactions of the Royal Society A, vol. 379, no. 2194, p. 20200209, 2021. 2, 4", + "[33] Y. Gorishniy, I. Rubachev, V. Khrulkov, and A. Babenko, \"Revisiting deep learning models for tabular data,\" in NeurIPS, 2021, pp. 18932-18943. 2, 3, 4, 6, 7, 8, 9, 11, 12, 19, 20", + "[34] D. Holzmüller, L. Grinsztajn, and I. Steinwart, \"Better by default: Strong pre-tuned mlp's and boosted trees on tabular data,\" in NeurIPS, 2024, pp. 26577-26658. 2, 4, 5, 7, 9, 12" + ], + "bbox": [ + 506, + 54, + 924, + 941 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 32, + 410, + 44 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[35] H.-J. Ye, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, \"Revisiting nearest neighbor for tabular data: A deep tabular baseline two decades later,\" in ICLR, 2025. 2, 3, 4, 9, 10, 21", + "[36] L. Grinsztajn, E. Oyallon, and G. Varoquaux, \"Why do tree-based models still outperform deep learning on typical tabular data?\" in NeurIPS, 2022, pp. 507-520. 2, 5, 6, 7, 8", + "[37] R. Shwartz-Ziv and A. Armon, \"Tabular data: Deep learning is not all you need,\" Information Fusion, vol. 81, pp. 84-90, 2022. 2", + "[38] E. Beyazit, J. Kozaczuk, B. Li, V. Wallace, and B. Fadlallah, \"An inductive bias for tabular deep learning,\" in NeurIPS, 2023, pp. 43108-43135. 2, 7, 11", + "[39] D. C. McElfresh, S. Khandagale, J. Valverde, V. P. C., G. Ramakrishnan, M. Goldblum, and C. White, \"When do neural nets outperform boosted trees on tabular data?\" in NeurIPS, 2023, pp. 76336-76369. 2, 5, 6, 7, 8, 18", + "[40] H.-J. Ye, D.-C. Zhan, N. Li, and Y. Jiang, \"Learning multiple local metrics: Global consideration helps,\" IEEE transactions on pattern analysis and machine intelligence, vol. 42, no. 7, pp. 1698-1712, 2019. 2", + "[41] S. M. Jesus, J. Pombal, D. Alves, A. F. Cruz, P. Saleiro, R. P. Ribeiro, J. Gama, and P. Bizarro, \"Turning the tables: Biased, imbalanced, dynamic tabular datasets for ML evaluation,\" in NeurIPS, 2022, pp. 33563-33575. 2, 5", + "[42] R. Kohli, M. Feurer, K. Eggensperger, B. Bischl, and F. Hutter, \"Towards quantifying the effect of datasets for benchmarking: A look at tabular machine learning,\" in ICLR Workshop, 2024. 2, 6", + "[43] A. Tschalzev, S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, \"A data-centric perspective on evaluating machine learning models for tabular data,\" in NeurIPS Datasets and Benchmarks Track, 2024. 2, 6, 8", + "[44] H.-J. Ye, S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and D.-C. Zhan, \"A closer look at deep learning on tabular data,\" CoRR, vol. abs/2407.00956, 2024. 2, 6, 7, 8, 21", + "[45] Y. Gorishniy, I. Rubachev, and A. Babenko, \"On embeddings for numerical features in tabular deep learning,\" in NeurIPS, 2022, pp. 24991-25004. 2, 4, 8, 9, 11", + "[46] T. Ucar, E. Hajiramezanali, and L. Edwards, \"Subtab: Subsetting features of tabular data for self-supervised representation learning,\" in NeurIPS, 2021, pp. 18853-18865. 2, 9, 14", + "[47] D. Bahri, H. Jiang, Y. Tay, and D. Metzler, \"Scarf: Self-supervised contrastive learning using random feature corruption,\" in ICLR, 2022. 2, 9, 14", + "[48] J. Yoon, Y. Zhang, J. Jordon, and M. van der Schaar, \"VIME: extending the success of self- and semi-supervised learning to tabular domain,\" in NeurIPS, 2020, pp. 11.033-11.043. 2, 9, 13, 14", + "[49] J. Wu, S. Chen, Q. Zhao, R. Sergazinov, C. Li, S. Liu, C. Zhao, T. Xie, H. Guo, C. Ji, D. Cociorva, and H. Brunzell, \"Switchtab: Switched autoencoders are effective tabular learners,\" in AAAI, 2024, pp. 15924-15933. 2, 7, 9, 13", + "[50] A. Kadra, M. Lindauer, F. Hutter, and J. Grabocka, \"Well-tuned simple nets excel on tabular datasets,\" in NeurIPS, 2021, pp. 23928-23941. 2, 4, 6, 9, 10, 12", + "[51] R. Wang, B. Fu, G. Fu, and M. Wang, \"Deep & cross network for ad click predictions,\" in ADKDD, 2017, pp. 1-7. 2, 7", + "[52] G. Klambauer, T. Unterthiner, A. Mayr, and S. Hochreiter, \"Self-normalizing neural networks,\" in NIPS, 2017, pp. 971-980. 2, 9, 12", + "[53] G. Ke, J. Zhang, Z. Xu, J. Bian, and T.-Y. Liu, \"Tabnn: A universal neural network solution for tabular data,\" 2018. 2", + "[54] R. Wang, R. Shivanna, D. Z. Cheng, S. Jain, D. Lin, L. Hong, and E. H. Chi, \"DCN V2: improved deep & cross network and practical lessons for web-scale learning to rank systems,\" in WWW, 2021, pp. 1785-1797. 2, 7, 9, 12", + "[55] J. Chen, K. Liao, Y. Wan, D. Z. Chen, and J. Wu, \"Danets: Deep abstract networks for tabular data classification and regression,\" in AAAI, 2022, pp. 3930-3938. 2, 9, 13", + "[56] J. Chen, K. Liao, Y. Fang, D. Chen, and J. Wu, \"Tabcaps: A capsule neural network for tabular data classification with bow routing,\" in ICLR, 2023. 2", + "[57] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, \"Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's,\" in KDD, 2024, pp. 3679-3689. 2", + "[58] C. Xu, Y.-C. Huang, J. Y.-C. Hu, W. Li, A. Gilani, H.-S. Goan, and H. Liu, \"Bishop: Bi-directional cellular learning for tabular data with generalized sparse modern hopfield model,\" in ICML, 2024, pp. 55048-55075. 2, 7, 9, 12" + ], + "bbox": [ + 76, + 54, + 491, + 941 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[59] S. Badirli, X. Liu, Z. Xing, A. Bhowmik, and S. S. Keerthi, \"Gradient boosting neural networks: Grownet,\" CoRR, vol. abs/2002.07971, 2020. 2, 7, 8, 9, 12, 19", + "[60] S. Popov, S. Morozov, and A. Babenko, “Neural oblivious decision ensembles for deep learning on tabular data,” in ICLR, 2020. 2, 8, 9, 12, 19, 20", + "[61] C.-H. Chang, R. Caruana, and A. Goldenberg, \"NODE-GAM: neural generalized additive model for interpretable deep learning,\" in ICLR, 2022. 2, 3, 8, 9, 12, 20", + "[62] W. Song, C. Shi, Z. Xiao, Z. Duan, Y. Xu, M. Zhang, and J. Tang, \"Autoint: Automatic feature interaction learning via self-attentive neural networks,\" in CIKM, 2019, pp. 1161-1170. 3, 7, 9, 11, 13", + "[63] X. Huang, A. Khetan, M. Cvitkovic, and Z. S. Karnin, \"Tabransformer: Tabular data modeling using contextual embeddings,\" CoRR, vol. abs/2012.06678, 2020. 3, 7, 8, 9, 11, 13, 14, 19, 20", + "[64] Q.-L. Zhou, H.-J. Ye, L. Wang, and D.-C. Zhan, \"Unlocking the transferability of tokens in deep models for tabular data,\" CoRR, vol. abs/2310.15149, 2023. 3, 9, 13, 15", + "[65] J. Chen, J. Yan, Q. Chen, D. Z. Chen, J. Wu, and J. Sun, \"Can a deep learning model be a sure bet for tabular prediction?\" in KDD, 2024, pp. 288-296. 3, 7, 8, 9, 12, 13", + "[66] A. Jeffares, T. Liu, J. Crabbé, F. Imrie, and M. van der Schaar, \"Tangos: Regularizing tabular neural networks through gradient orthogonalization and specialization,\" in ICLR, 2023. 3, 9, 10", + "[67] H. Ye, W. Fan, X. Song, S. Zheng, H. Zhao, D. dan Guo, and Y. Chang, \"Ptarl: Prototype-based tabular representation learning via space calibration,\" in ICLR, 2024. 3, 9, 10", + "[68] Y. Nader, L. Sixt, and T. Landgraf, \"DNNR: differential nearest neighbors regression,\" in ICML, 2022, pp. 16296-16317. 3, 7, 9, 10", + "[69] Y. Gorishniy, I. Rubachev, N. Kartashev, D. Shlenskii, A. Kotelnikov, and A. Babenko, \"Tabr: Tabular deep learning meets nearest neighbors in 2023,\" in ICLR, 2024. 3, 6, 7, 9, 10, 19", + "[70] G. Somepalli, A. Schwarzschild, M. Goldblum, C. B. Bruss, and T. Goldstein, \"SAINT: Improved neural networks for tabular data via row attention and contrastive pre-training,\" in NeurIPS Workshop, 2022. 3, 7, 9, 10, 11, 13, 14", + "[71] I. Rubachev, A. Alekberov, Y. Gorishniy, and A. Babenko, \"Revisiting pretraining objectives for tabular deep learning,\" CoRR, vol. abs/2207.03208, 2022. 3, 7, 13, 14", + "[72] S. Onishi, K. Oono, and K. Hayashi, \"Tabret: Pre-training transformer-based tabular models for unseen columns,\" CoRR, vol. abs/2303.15747, 2023. 3, 9, 12, 15", + "[73] J. Shen, L. Li, L. M. Dery, C. Staten, M. Khodak, G. Neubig, and A. Talwalkar, \"Cross-modal fine-tuning: Align then refine,\" in ICML, 2023, pp. 31030-31056. 3, 9, 13, 15", + "[74] Y. Zhu, T. Brettin, F. Xia, A. Partin, M. Shukla, H. Yoo, Y. A. Evrard, J. H. Doroshow, and R. L. Stevens, \"Converting tabular data into images for deep learning with convolutional neural networks,\" Scientific Reports, vol. 11, no. 11325, 2021. 3, 4, 9, 17", + "[75] S. Lee and S.-C. Lee, \"Tableye: Seeing small tables through the lens of images,\" CoRR, vol. abs/2307.02491, 2023. 3, 9, 17", + "[76] A. Mamdouh, M. El-Melegy, S. Ali, and R. Kikinis, \"Tab2visual: Overcoming limited data in tabular data classification using deep learning with visual representations,\" CoRR, vol. abs/2502.07181, 2025.3,9,17", + "[77] Z. Wang and J. Sun, \"Transtab: Learning transferable tabular transformers across tables,\" in NeurIPS, 2022, pp. 2902-2915. 3, 9, 13, 16", + "[78] J. Yan, B. Zheng, H. Xu, Y. Zhu, D. Z. Chen, J. Sun, J. Wu, and J. Chen, \"Making pre-trained language models great on tabular prediction,\" in ICLR, 2024. 3, 6, 9, 16, 17", + "[79] C. Ye, G. Lu, H. Wang, L. Li, S. Wu, G. Chen, and J. Zhao, \"Towards cross-table masked pretraining for web data mining,\" in WWW, 2024, pp. 4449-4459. 3, 6, 9, 16", + "[80] S. Hegselmann, A. Buendia, H. Lang, M. Agrawal, X. Jiang, and D. Sontag, \"Tabllm: few-shot classification of tabular data with large language models,\" in AISTATS, 2023, pp. 5549-5581. 3, 9, 13, 16, 17", + "[81] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, \"From supervised to generative: A novel paradigm for tabular deep learning with large language models,\" in SIGKDD, 2024, pp. 3323-3333. 3, 6", + "[82] N. Hollmann, S. Müller, and F. Hutter, \"Large language models for automated data science: Introducing CAAFE for context-aware automated feature engineering,\" in NeurIPS, 2023, pp. 44753-44775. 3, 9" + ], + "bbox": [ + 506, + 55, + 923, + 940 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 75, + 32, + 410, + 44 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[83] S. Han, J. Yoon, S. Ö. Arik, and T. Pfister, \"Large language models can automatically engineer features for few-shot tabular learning,\" in ICML, 2024, pp. 17454-17479. 3, 9", + "[84] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., \"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt,\" International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024. 3, 17", + "[85] Y. Liang, H. Wen, Y. Nie, Y. Jiang, M. Jin, D. Song, S. Pan, and Q. Wen, \"Foundation models for time series analysis: A tutorial and survey,\" in SIGKDD, 2024, pp. 6555-6565. 3", + "[86] H.-J. Ye, Q.-L. Zhou, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, \"Rethinking pre-training in tabular data: A neighborhood embedding perspective,\" CoRR, vol. abs/2311.00055, 2025. 3, 9, 18", + "[87] D. Bonet, D. M. Montserrat, X. G. i Nieto, and A. G. Ioannidis, \"Hyperfast: Instant classification for tabular data,\" in AAAI, 2024, pp. 11 114-11 123. 3, 7, 9, 18", + "[88] A. Müller, C. Curino, and R. Ramakrishnan, \"Mothernet: Fast training and inference via hyper-network transformers,\" in ICLR, 2025. 3, 8, 9, 18", + "[89] N. Hollmann, S. Müller, K. Eggensperger, and F. Hutter, \"Tabpfn: A transformer that solves small tabular classification problems in a second,\" in ICLR, 2023. 3, 6, 7, 8, 9, 10, 16, 18, 19", + "[90] V. Thomas, J. Ma, R. Hosseinzadeh, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, \"Retrieval & fine-tuning for in-context tabular models,\" in NeurIPS, 2024, pp. 108439-108467. 3, 10, 19", + "[91] N. Hollmann, S. Müller, L. Purucker, A. Krishnakumar, M. Körfer, S. B. Hoo, R. T. Schirrmeister, and F. Hutter, \"Accurate predictions on small data with a tabular foundation model,\" Nature, vol. 637, no. 8045, pp. 319-326, 2025. 3, 9, 10, 18, 19, 20", + "[92] J. Gardner, J. C. Perdomo, and L. Schmidt, \"Large scale transfer learning for tabular data via language modeling,\" in NeurIPS, 2024, pp. 45155-45205. 3, 6, 9, 19", + "[93] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, \"From supervised to generative: A novel paradigm for tabular deep learning with large language models,\" in SIGKDD, 2024, pp. 3323-3333. 3, 9, 17, 19", + "[94] X. Wen, S. Zheng, Z. Xu, Y. Sun, and J. Bian, \"Scalable in-context learning on tabular data via retrieval-augmented large language models,\" CoRR, vol. abs/2502.03147, 2025. 3, 9, 19", + "[95] Y. Gorishniy, A. Kotelnikov, and A. Babenko, \"Tabm: Advancing tabular deep learning with parameter-efficient ensembling,\" CoRR, vol. abs/2410.24210, 2024. 3, 20, 21", + "[96] S.-Y. Liu and H.-J. Ye, \"Tabpfn unleashed: A scalable and effective solution to tabular classification problems,\" CoRR, vol. abs/2502.02527, 2025. 3, 17, 19, 20", + "[97] J. Svirsky and O. Lindenbaum, \"Interpretable deep clustering for tabular data,\" in ICML, 2024, pp. 47314-47330. 3, 20", + "[98] H. T. Rauf, A. Freitas, and N. W. Paton, \"Tabledc: Deep clustering for tabular data,\" CoRR, vol. abs/2405.17723, 2024. 3, 20", + "[99] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, \"Adbench: Anomaly detection benchmark,\" in NeurIPS, 2022, pp. 32142-32159. 3, 20", + "[100] T. Shenkar and L. Wolf, \"Anomaly detection for tabular data with internal contrastive learning,\" in ICLR, 2022. 3", + "[101] J. Yin, Y. Qiao, Z. Zhou, X. Wang, and J. Yang, \"MCM: masked cell modeling for anomaly detection in tabular data,\" in ICLR, 2024. 3", + "[102] L. Hansen, N. Seedat, M. van der Schaar, and A. Petrovic, \"Reimagining synthetic tabular data generation through data-centric AI: A comprehensive benchmark,\" in NeurIPS, 2023, pp. 33781-33823. 3", + "[103] C. Hou, S. Gu, C. Xu, and Y. Qian, \"Incremental learning for simultaneous augmentation of feature and class,\" IEEE Transactions on pattern analysis and machine intelligence, vol. 45, no. 12, pp. 14789-14806, 2023. 3", + "[104] M. Vero, M. Balunovic, and M. T. Vechev, \"Cuts: Customizable tabular synthetic data generation,\" in ICML, 2024, pp. 49408-49433. 3", + "[105] S. Ö. Arik and T. Pfister, \"Tabnet: Attentive interpretable tabular learning,\" in AAAI, 2021, pp. 6679-6687. 3, 7, 8, 9, 12, 20", + "[106] P. Hager, M. J. Menten, and D. Rueckert, \"Best of both worlds: Multimodal contrastive learning with tabular and imaging data,\" in CVPR, 2023, pp. 23924-23935. 3, 7, 21", + "[107] J.-P. Jiang, H.-J. Ye, L. Wang, Y. Yang, Y. Jiang, and D.-C. Zhan, \"Tabular insights, visual impacts: Transferring expertise from tables to images,\" in ICML, 2024, pp. 21988-22009. 3, 7, 21" + ], + "bbox": [ + 76, + 54, + 493, + 941 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[108] Y. Diao, Y. Yang, Q. Li, B. He, and M. Lu, \"Oebench: Investigating open environment challenges in real-world relational data streams,\" VLDB, vol. 17, no. 6, pp. 1283-1296, 2024. 3", + "[109] I. Rubachev, N. Kartashev, Y. Gorishniy, and A. Babenko, \"Tabred: A benchmark of tabular machine learning in-the-wild,\" CoRR, vol. abs/2406.19380, 2024. 3, 6, 8, 20, 21", + "[110] J. Gardner, Z. Popovic, and L. Schmidt, \"Benchmarking distribution shift in tabular data with tableshift,\" in NeurIPS, 2024, pp. 53385-53432. 3, 20, 21", + "[111] Z.-H. Zhou, \"Learnability with time-sharing computational resource concerns,\" National Science Review, vol. 11, no. 10, p. nwae204, 2024. 3", + "[112] N. Jin, J. Siebert, D. Li, and Q. Chen, \"A survey on table question answering: recent advances,\" in CCKS, 2022, pp. 174-186. 3, 21", + "[113] X. Fang, W. Xu, F. A. Tan, J. Zhang, Z. Hu, Y. Qi, S. Nickleach, D. Socolinsky, S. Sengamedu, and C. Faloutsos, \"Large language models (llms) on tabular data: Prediction, generation, and understanding-a survey,\" CoRR, vol. abs/2402.17944, 2024. 3, 21", + "[114] C. Winship and R. D. Mare, \"Regression models with ordinal variables,\" American sociological review, vol. 49, no. 4, pp. 512-525, 1984. 3", + "[115] P. A. Gutierrez, M. Perez-Ortiz, J. Sánchez-Monedero, F. Fernández-Navarro, and C. Hervás-Martínez, \"Ordinal regression methods: Survey and experimental study,\" IEEE Trans. Knowl. Data Eng., vol. 28, no. 1, pp. 127-146, 2016. 3", + "[116] A. Jeffares, A. Curth, and M. van der Schaar, \"Deep learning through A telescoping lens: A simple model provides empirical insights on grokking, gradient boosting & beyond,\" in NeurIPS, 2024, pp. 123-498-123-533. 4", + "[117] G. Cormode, P. Indyk, N. Koudas, and S. Muthukrishnan, \"Fast mining of massive tabular data via approximate distance computations,\" in ICDE, 2002, pp. 605-614. 4", + "[118] M. D. Adelfio and H. Samet, \"Schema extraction for tabular data on the web,\" VLDB, vol. 6, no. 6, pp. 421-432, 2013. 4", + "[119] J. F. Arias, A. K. Chhabra, and V. Misra, \"Efficient interpretation of tabular documents,\" in ICPR, 1996, pp. 681-685. 4", + "[120] H.-L. Wang, S.-H. Wu, K. K. Wang, C.-L. Sung, W.-L. Hsu, and W.-K. Shih, \"Semantic search on internet tabular information extraction for answering queries,\" in CIKM, 2000, pp. 243-249. 4", + "[121] M.-J. Nederhof, \"An optimal tabular parsing algorithm,\" in ACL, 1994, pp. 117-124. 4", + "[122] J. F. Arias, A. K. Chhabra, and V. Misra, \"Interpreting and representing tabular documents,\" in CVPR, 1996, pp. 600-605. 4", + "[123] G. Richards and V. J. Rayward-Smith, \"Discovery of association rules in tabular data,\" in ICDM, 2001, pp. 465-472. 4", + "[124] J. R. Quinlan, \"Induction of decision trees,\" Machine learning, vol. 1, pp. 81-106, 1986. 4", + "[125] L. Breiman, J. Friedman, R. Olshen, and C. J. Stone, Classification and Regression Trees. Chapman and Hall/CRC, 1984. 4", + "[126] Y. Freund and R. E. Schapire, “A desicion-theoretic generalization of on-line learning and an application to boosting,” in EuroCOLT, 1995, pp. 23-37. 4, 19", + "[127] L. Breiman, \"Random forests,\" Machine Learning, vol. 45, no. 1, pp. 5-32, 2001. 4, 19", + "[128] J. H. Friedman, \"Greedy function approximation: a gradient boosting machine,\" Annals of statistics, pp. 1189-1232, 2001. 4", + "[129] ——, \"Stochastic gradient boosting,\" Computational statistics & data analysis, vol. 38, no. 4, pp. 367-378, 2002. 4", + "[130] T. Chen and C. Guestrin, \"Xgboost: A scalable tree boosting system,\" in KDD, 2016, pp. 785-794. 4, 8, 18, 20", + "[131] G. Ke, Q. Meng, T. Finley, T. Wang, W. Chen, W. Ma, Q. Ye, and T.-Y. Liu, \"Lightgbm: A highly efficient gradient boosting decision tree,\" in NIPS, 2017, pp. 3146-3154. 4, 8, 20", + "[132] L. O. Prokhorenkova, G. Gusev, A. Vorobev, A. V. Dorogush, and A. Gulin, \"Catboost: unbiased boosting with categorical features,\" in NeurIPS, 2018, pp. 6639-6649. 4, 8", + "[133] D. Nielsen, \"Tree boosting with xgboost-why does xgboost win \"every\" machine learning competition?\" Master's thesis, NTNU, 2016. 4", + "[134] S. Makridakis, E. Spiliotis, and V. Assimakopoulos, \"M5 accuracy competition: Results, findings, and conclusions,\" International Journal of Forecasting, vol. 38, no. 4, pp. 1346-1364, 2022. 4", + "[135] H. Larochelle, D. Erhan, A. Courville, J. Bergstra, and Y. Bengio, \"An empirical evaluation of deep architectures on problems with many factors of variation,\" in ICML, 2007, pp. 473-480. 4" + ], + "bbox": [ + 506, + 55, + 923, + 941 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 75, + 32, + 410, + 44 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[136] R. Salakhutdinov and G. Hinton, \"Learning a nonlinear embedding by preserving class neighbourhood structure,\" in AISTATS, 2007, pp. 412-419. 4", + "[137] R. Min, D. A. Stanley, Z. Yuan, A. Bonner, and Z. Zhang, “A deep non-linear feature mapping for large-margin knn classification,” in ICDM, 2009, pp. 357-366. 4", + "[138] M. Ahmed, A. N. Mahmood, and J. Hu, \"A survey of network anomaly detection techniques,\" Journal of Network and Computer Applications, vol. 60, pp. 19-31, 2016. 4", + "[139] L. Lu, M. Medo, C. H. Yeung, Y.-C. Zhang, Z.-K. Zhang, and T. Zhou, \"Recommender systems,\" Physics reports, vol. 519, no. 1, pp. 1-49, 2012. 4", + "[140] D. Salinas, V. Flunkert, J. Gasthaus, and T. Januschowski, \"Deepar: Probabilistic forecasting with autoregressive recurrent networks,\" International journal of forecasting, vol. 36, no. 3, pp. 1181-1191, 2020. 4", + "[141] T.-J. Huang, X.-Y. Chen, and H.-J. Ye, \"Seqfusion: Sequential fusion of pre-trained models for zero-shot time-series forecasting,\" CoRR, vol. abs/2503.02836, 2025. 4", + "[142] Q. Liu, F. Yu, S. Wu, and L. Wang, \"A convolutional click prediction model,\" in CIKM, 2015, pp. 1743-1746. 4", + "[143] H. Guo, R. Tang, Y. Ye, Z. Li, and X. He, \"Deepfm: A factorization-machine based neural network for CTR prediction,\" in IJCAI, 2017, pp. 1725-1731. 4", + "[144] S. Somvanshi, S. Das, S. A. Javed, G. Antariksa, and A. Hossain, \"A survey on deep tabular learning,\" CoRR, vol. abs/2410.12034, 2024. 4", + "[145] D. Lane, D. Scott, M. Hebl, R. Guerra, D. Osherson, and H. Zimmer, Introduction to statistics. CiteSeer, 2003. 4", + "[146] A. F. Karr, A. P. Sanil, and D. L. Banks, \"Data quality: A statistical perspective,\" Statistical Methodology, vol. 3, no. 2, pp. 137-173, 2006. 4", + "[147] A. Sánchez-Morales, J.-L. Sancho-Gómez, J.-A. Martínez-García, and A. R. Figueiras-Vidal, \"Improving deep learning performance with missing values via deletion and compensation,\" Neural Computing and Applications, vol. 32, pp. 13233-13244, 2020. 4", + "[148] D. Chicco, L. Oneto, and E. Tavazzi, \"Eleven quick tips for data cleaning and feature engineering,\" PLOS Computational Biology, vol. 18, no. 12, p. e1010718, 2022. 4", + "[149] Y. Luo, M. Wang, H. Zhou, Q. Yao, W.-W. Tu, Y. Chen, W. Dai, and Q. Yang, \"Autocross: Automatic feature crossing for tabular data in real-world applications,\" in KDD, 2019, pp. 1936-1945. 4", + "[150] H. He and E. A. Garcia, \"Learning from imbalanced data,\" IEEE Transactions on knowledge and data engineering, vol. 21, no. 9, pp. 1263-1284, 2009. 5", + "[151] H. He and Y. Ma, Imbalanced learning: foundations, algorithms, and applications. John Wiley & Sons, 2013. 5", + "[152] T. Lin, P. Goyal, R. B. Girshick, K. He, and P. Dollar, \"Focal loss for dense object detection,\" in ICCV, 2017, pp. 2999-3007. 5", + "[153] J. M. Johnson and T. M. Khoshgoftaar, \"Survey on deep learning with class imbalance,\" Journal of big data, vol. 6, no. 1, pp. 1-54, 2019. 5", + "[154] J. Engelmann and S. Lessmann, \"Conditional Wasserstein gan-based oversampling of tabular data for imbalanced learning,\" Expert Systems with Applications, vol. 174, p. 114582, 2021. 5", + "[155] R. Sauber-Cole and T. M. Khoshgoftaar, \"The use of generative adversarial networks to alleviate class imbalance in tabular data: a survey,\" Journal of Big Data, vol. 9, no. 1, p. 98, 2022. 5, 21", + "[156] X.-Y. Liu, J. Wu, and Z.-H. Zhou, \"Exploratory undersampling for class-imbalance learning,\" IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550, 2008. 5", + "[157] N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer, \"SMOTE: synthetic minority over-sampling technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002. 5", + "[158] A. Fernández, S. García, F. Herrera, and N. V. Chawla, \"SMOTE for learning from imbalanced data: Progress and challenges, marking the 15-year anniversary,\" Journal of Artificial Intelligence Research, vol. 61, pp. 863-905, 2018. 5", + "[159] K. Cao, C. Wei, A. Gaidon, N. Arechiga, and T. Ma, \"Learning imbalanced datasets with label-distribution-aware margin loss,\" in NeurIPS, 2019, pp. 1567-1578. 5", + "[160] Y. Cui, M. Jia, T.-Y. Lin, Y. Song, and S. Belongie, \"Class-balanced loss based on effective number of samples,\" in CVPR, 2019, pp. 9268-9277. 5", + "[161] Y. Xie, Z. Wang, Y. Li, B. Ding, N. M. Gurel, C. Zhang, M. Huang, W. Lin, and J. Zhou, \"Fives: Feature interaction via edge search for large-scale tabular data,\" in SIGKDD, 2021, pp. 3795-3805. 5" + ], + "bbox": [ + 76, + 55, + 493, + 941 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[162] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, \"Annotatedtables: A large tabular dataset with language model annotations,\" CoRR, vol. abs/2406.16349, 2024. 5", + "[163] A. Klein and F. Hutter, \"Tabular benchmarks for joint architecture and hyperparameter optimization,\" CoRR, vol. abs/1905.04970, 2019. 5", + "[164] P. Pokhrel, \"A comparison of automl hyperparameter optimization tools for tabular data,\" Ph.D. dissertation, Youngstown State University, 2023. 5", + "[165] F. Hutter, L. Kotthoff, and J. Vanschoren, Automated machine learning: methods, systems, challenges. Springer Nature, 2019. 5", + "[166] X. He, K. Zhao, and X. Chu, \"Automl: A survey of the state-of-the-art,\" Knowledge-based systems, vol. 212, p. 106622, 2021. 5", + "[167] M. Feurer, K. Eggensperger, S. Falkner, M. Lindauer, and F. Hutter, \"Auto-sklearn 2.0: Hands-free automl via meta-learning,\" Journal of Machine Learning Research, vol. 23, no. 261, pp. 1-61, 2022. 5", + "[168] C. Mennella, U. Maniscalco, G. De Pietro, and M. Esposito, \"Ethical and regulatory challenges of ai technologies in healthcare: A narrative review,\" Heliyon, vol. 10, no. 4, 2024. 5", + "[169] W. Moore and S. Frye, \"Review of hipaa, part 1: history, protected health information, and privacy and security rules,\" Journal of nuclear medicine technology, vol. 47, no. 4, pp. 269-272, 2019. 5", + "[170] D. F. Sittig and H. Singh, \"Legal, ethical, and financial dilemmas in electronic health record adoption and use,\" Pediatrics, vol. 127, no. 4, pp. e1042-e1047, 2011. 5", + "[171] J. Amann, A. Blasimme, E. Vayena, D. Frey, V. I. Madai, and P. Consortium, \"Explainability for artificial intelligence in healthcare: a multidisciplinary perspective,\" BMC medical informatics and decision making, vol. 20, pp. 1-9, 2020. 5", + "[172] B. S. Caffo, F. A. D'Asaro, A. Garcez, and E. Raffinetti, \"Explainable artificial intelligence models and methods in finance and healthcare,\" p. 970246, 2022. 5", + "[173] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger, \"On calibration of modern neural networks,\" in ICML, 2017, pp. 1321-1330. 5", + "[174] K. Helli, D. Schnurr, N. Hollmann, S. Müller, and F. Hutter, \"Drift-resilient tabpfn: In-context learning temporal distribution shifts on tabular data,\" in NeurIPS, 2024, pp. 98742-98781. 5, 21", + "[175] J. Demsr, \"Statistical comparisons of classifiers over multiple data sets,\" Journal of Machine Learning Research, vol. 7, pp. 1-30, 2006. 5", + "[176] Y. Gorishniy, A. Kotelnikov, and A. Babenko, \"Tabm: Advancing tabular deep learning with parameter-efficient ensembling,\" in ICLR, 2025. 5, 19", + "[177] M. E. Glickman and A. C. Jones, \"Rating the chess rating system,\" CHANCE-BERLIN THEN NEW YORK-, vol. 12, pp. 21-28, 1999. 5", + "[178] L. M. Hvattum and H. Arntzen, \"Using elo ratings for match result prediction in association football,\" International Journal of forecasting, vol. 26, no. 3, pp. 460-470, 2010. 5", + "[179] J. Ma, V. Thomas, R. Hosseinzadeh, H. Kamkari, A. Labach, J. C. Cresswell, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, \"Tabdpt: Scaling tabular foundation models,\" CoRR, vol. abs/2410.18164, 2024. 6, 18, 19", + "[180] A. Tschalzev, L. Purucker, S. Lüdtke, F. Hutter, C. Bartelt, and H. Stuckenschmidt, \"Unreflected use of tabular data repositories can undermine research quality,\" in ICLR Workshop, 2025. 6, 7", + "[181] S. B. Rabbani, I. V. Medri, and M. D. Samad, \"Attention versus contrastive learning of tabular data - A data-centric benchmarking,\" CoRR, vol. abs/2401.04266, 2024. 6", + "[182] Y. Yang, Y. Wang, G. Liu, L. Wu, and Q. Liu, \"Unitabe: A universal pretraining protocol for tabular foundation model in data science,\" in ICLR, 2024. 6, 9, 16", + "[183] G. Eggert, K. Huo, M. Biven, and J. Waugh, \"Tablib: A dataset of 627m tables with context,\" CoRR, vol. abs/2310.07875, 2023. 6", + "[184] H. W. Jian Yang, Xuefeng Li, \"DeepTables: A Deep Learning Python Package for Tabular Data,\" https://github.com/DataCanvasIO/DeepTables, 2022, version 0.2.x.6", + "[185] N. Erickson, J. Mueller, A. Shirkov, H. Zhang, P. Larroy, M. Li, and A. Smola, \"Autogluon-tabular: Robust and accurate automl for structured data,\" CoRR, vol. abs/2003.06505, 2020. 6", + "[186] M. Joseph, \"Pytorch tabular: A framework for deep learning with tabular data,\" CoRR, vol. abs/2104.13638, 2021. 6", + "[187] J. R. Zaurin and P. Mulinka, \"pytorch-widedeep: A flexible package for multimodal deep learning,\" Journal of Open Source Software, vol. 8, no. 86, p. 5027, Jun. 2023. 6" + ], + "bbox": [ + 506, + 55, + 923, + 941 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 32, + 410, + 44 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[188] S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and H.-J. Ye, \"TALENT: A tabular analytics and learning toolbox,\" CoRR, vol. abs/2407.04057, 2024. 6", + "[189] T. Akiba, S. Sano, T. Yanase, T. Ohta, and M. Koyama, \"Optuna: A next-generation hyperparameter optimization framework,\" in KDD, 2019, pp. 2623-2631. 6", + "[190] N. Morgan and H. Bourlard, \"Generalization and parameter estimation in feedforward nets: Some experiments,\" in NeuIPS, 1989, pp. 630-637. 7", + "[191] S. Arlot and A. Celisse, \"A survey of cross-validation procedures for model selection,\" CoRR, vol. abs/0907.4728, 2009. 7", + "[192] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, T.-W. Chen, and T.-H. Chang, \"Prompt: Towards a better deep neural network for tabular data,\" in ICML, 2023, pp. 4392-4434. 7, 9, 10", + "[193] S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, \"GRANDE: gradient-based decision tree ensembles for tabular data,\" in ICLR, 2024. 7, 8, 9, 12, 19", + "[194] X. Jiang, A. Margeloiu, N. Simidjievski, and M. Jamnik, \"Protogate: Prototype-based neural networks with global-to-local feature selection for tabular biomedical data,\" in ICML, 2024, pp. 21844-21878. 7", + "[195] G. C. Cawley and N. L. C. Talbot, \"On over-fitting in model selection and subsequent selection bias in performance evaluation,\" Journal of Machine Learning Research, vol. 11, pp. 2079-2107, 2010. 7", + "[196] T. G. Dietterich, \"Approximate statistical tests for comparing supervised classification learning algorithms,\" Neural Computation, vol. 10, no. 7, pp. 1895-1923, 1998. 7", + "[197] S. Raschka, \"Model evaluation, model selection, and algorithm selection in machine learning,\" CoRR, vol. abs/1811.12808, 2018. 7", + "[198] H. Schulz-Kumpel, S. Fischer, T. Nagler, A. Boulesteix, B. Bischl, and R. Hornung, \"Constructing confidence intervals for 'the' generalization error - a comprehensive benchmark study,\" CoRR, vol. abs/2409.18836, 2024. 7", + "[199] T. Nagler, L. Schneider, B. Bischl, and M. Feurer, \"Reshuffling resampling splits can improve generalization of hyperparameter optimization,\" in NeurIPS, 2024. 7", + "[200] J. Feng, Y. Yu, and Z. Zhou, \"Multi-layered gradient boosting decision trees,\" in NeurIPS, 2018, pp. 3555-3565. 7", + "[201] I. Padhi, Y. Schiff, I. Melnyk, M. Rigotti, Y. Mroueh, P. Dognin, J. Ross, R. Nair, and E. Altman, \"Tabular transformers for modeling multivariate time series,\" in ICASSP, 2021, pp. 3565-3569. 7", + "[202] F. Di Martino and F. Delmastro, \"Explainable ai for clinical and remote health applications: a survey on tabular and time series data,\" Artificial Intelligence Review, vol. 56, no. 6, pp. 5261-5315, 2023. 7", + "[203] G. M. Van de Ven, T. Tuytelaars, and A. S. Tolias, \"Three types of incremental learning,\" Nature Machine Intelligence, vol. 4, no. 12, pp. 1185-1197, 2022. 7", + "[204] D.-W. Zhou, Q.-W. Wang, Z.-H. Qi, H.-J. Ye, D.-C. Zhan, and Z. Liu, \"Class-incremental learning: A survey,\" IEEE transactions on pattern analysis and machine intelligence, vol. 46, no. 12, pp. 9851-9873, 2024. 7", + "[205] J. Yosinski, J. Clune, Y. Bengio, and H. Lipson, \"How transferable are features in deep neural networks?\" in NIPS, vol. 27, 2014. 7", + "[206] S. U. H. Dar, M. Özbey, A. B. Çatlı, and T. Çukur, \"A transfer-learning approach for accelerated mri using deep neural networks,\" Magnetic resonance in medicine, vol. 84, no. 2, pp. 663-685, 2020. 7", + "[207] Y. Cao, Z. Fang, Y. Wu, D.-X. Zhou, and Q. Gu, \"Towards understanding the spectral bias of deep learning,\" CoRR, vol. abs/1912.01198, 2019. 7", + "[208] R. Basri, M. Galun, A. Geifman, D. Jacobs, Y. Kasten, and S. Kritchman, \"Frequency bias in neural networks for input of non-uniform density,\" in ICML, 2020, pp. 685-694. 7", + "[209] F. Matteucci, V. Arzamasov, and K. Böhm, \"A benchmark of categorical encoders for binary classification,\" in NeurIPS, 2023, pp. 54855-54875. 8", + "[210] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, \"Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's,\" in SIGKDD, 2024, pp. 3679-3689. 8", + "[211] M. Pang, K. M. Ting, P. Zhao, and Z. Zhou, \"Improving deep forest by screening,\" IEEE Transactions on Knowledge and Data Engineering., vol. 34, no. 9, pp. 4298-4312, 2022. 8", + "[212] M. T. Ribeiro, S. Singh, and C. Guestrin, \"why should I trust you?: Explaining the predictions of any classifier,\" in KDD, 2016, pp. 1135-1144. 8" + ], + "bbox": [ + 76, + 54, + 491, + 941 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[213] S. M. Lundberg and S. Lee, “A unified approach to interpreting model predictions,” in NIPS, 2017, pp. 4765-4774. 8", + "[214] Z.-H. Zhou and J. Feng, \"Deep forest,\" National science review, vol. 6, no. 1, pp. 74-86, 2019. 8", + "[215] Y. Cheng, R. Hu, H. Ying, X. Shi, J. Wu, and W. Lin, \"Arithmetic feature interaction is necessary for deep tabular learning,\" in AAAI, 2024, pp. 11516-11524. 9, 12, 13", + "[216] J. Kossen, N. Band, C. Lyle, A. N. Gomez, T. Rainforth, and Y. Gal, \"Self-attention between datapoints: Going beyond individual input-output pairs in deep learning,\" in NeurIPS, 2021, pp. 28742-28756. 9", + "[217] B. Schäfl, L. Gruber, A. Bitto-Nemling, and S. Hochreiter, \"Hop- ular: Modern hopfield networks for tabular data,\" CoRR, vol. abs/2206.00664, 2022. 9, 10", + "[218] H. Kim, A. Mnih, J. Schwarz, M. Garnelo, S. M. A. Eslami, D. Rosenbaum, O. Vinyals, and Y. W. Teh, \"Attentive neural processes,\" in ICLR, 2019. 9, 10", + "[219] I. Shavitt and E. Segal, \"Regularization learning networks: deep learning for tabular datasets,\" in NeurIPS, 2018, pp. 1386-1396. 9, 10", + "[220] V. Verma, T. Luong, K. Kawaguchi, H. Pham, and Q. V. Le, \"Towards domain-agnostic contrastive learning,\" in ICML, 2021, pp. 10530-10541. 9, 14", + "[221] C. Lee, F. Imrie, and M. van der Schaar, \"Self-supervision enhanced feature selection with correlated gates,\" in ICLR, 2022. 9, 14", + "[222] R. Levin, V. Cherepanova, A. Schwarzschild, A. Bansal, C. B. Bruss, T. Goldstein, A. G. Wilson, and M. Goldblum, \"Transfer learning with deep tabular models,\" in ICLR, 2023. 9, 13, 14, 15", + "[223] K. Majmundar, S. Goyal, P. Netrapalli, and P. Jain, \"MET: masked encoding for tabular data,\" CoRR, vol. abs/2206.08564, 2022. 9, 14", + "[224] E. Hajiramezanali, N. L. Diamant, G. Scalia, and M. W. Shen, \"Stab: Self-supervised learning for tabular data,\" in NeurIPS Workshop, 2022. 9, 14", + "[225] S. Chen, J. Wu, N. Hovakimyan, and H. Yao, \"Recontab: Regularized contrastive representation learning for tabular data,\" CoRR, vol. abs/2310.18541, 2023. 9, 14", + "[226] W.-W. Du, W.-Y. Wang, and W.-C. Peng, \"Dora: Domain-based self-supervised learning framework for low-resource real estate appraisal,\" in CIKM, 2023, pp. 4552-4558. 9, 14", + "[227] Y. Sui, T. Wu, J. C. Cresswell, G. Wu, G. Stein, X. S. Huang, X. Zhang, and M. Volkovs, \"Self-supervised representation learning from random data projectors,\" in ICLR, 2024. 9, 14", + "[228] T. Iwata and A. Kumagai, \"Meta-learning from tasks with heterogeneous attribute spaces,\" in NeurIPS, 2020, pp. 6053-6063. 9, 13, 15", + "[229] L. Liu, M. M. Fard, and S. Zhao, \"Distribution embedding networks for generalization from a diverse set of classification tasks,\" Transactions on Machine Learning Research, 2022. 9, 15", + "[230] B. Zhu, X. Shi, N. Erickson, M. Li, G. Karypis, and M. Shoaran, \"Xtab: Cross-table pretraining for tabular transformers,\" in ICML, 2023, pp. 43181-43204. 9, 12, 13, 15", + "[231] Y. Zhang, K. Gong, K. Zhang, H. Li, Y. Qiao, W. Ouyang, and X. Yue, \"Meta-transformer: A unified framework for multimodal learning,\" CoRR, vol. abs/2307.10802, 2023. 9, 15", + "[232] G. Liu, J. Yang, and L. Wu, \"Ptab: Using the pre-trained language model for modeling tabular data,\" CoRR, vol. abs/2209.08060, 2022. 9, 16", + "[233] M. J. Kim, L. Grinsztajn, and G. Varoquaux, \"CARTE: pretraining and transfer for tabular learning,\" in ICML, 2024, pp. 23843-23866. 9, 16, 17", + "[234] Z. Cheng, T. Xie, P. Shi, C. Li, R. Nadkarni, Y. Hu, C. Xiong, D. Radev, M. Ostendorf, L. Zettlemoyer, N. A. Smith, and T. Yu, \"Binding language models in symbolic languages,\" in ICLR, 2023. 9, 16", + "[235] T. Zhang, S. Wang, S. Yan, L. Jian, and Q. Liu, \"Generative table pre-training empowers models for tabular prediction,\" in EMNLP, 2023. 9, 16", + "[236] T. Dinh, Y. Zeng, R. Zhang, Z. Lin, M. Gira, S. Rajput, J. yong Sohn, D. S. Papailiopoulos, and K. Lee, \"LIFT: language-interfaced fine-tuning for non-language machine learning tasks,\" in NeurIPS, 2022, pp. 11763-11784. 9, 16", + "[237] R. Wang, Z. Wang, and J. Sun, \"Unipredict: Large language models are universal tabular predictors,\" CoRR, vol. abs/2310.03266, 2023. 9, 16", + "[238] A. Sharma, E. Vans, D. Shigemizu, K. A. Boroevich, and T. Tsunoda, \"Deepinsight: A methodology to transform a non-image" + ], + "bbox": [ + 506, + 55, + 923, + 941 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 75, + 32, + 410, + 44 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "data to an image for convolution neural network architecture,\" Scientific reports, vol. 9, no. 1, p. 11399, 2019. 9, 17", + "[239] O. Bazgir, R. Zhang, S. R. Dhruba, R. Rahman, S. Ghosh, and R. Pal, \"Representation of features as images with neighborhood dependencies for compatibility with convolutional neural networks,\" Nature communications, vol. 11, no. 1, p. 4391, 2020. 9, 17", + "[240] L. Buturovic and D. Miljkovic, \"A novel method for classification of tabular data using convolutional neural networks,\" BioRxiv, pp. 2020-05, 2020. 9, 17", + "[241] V. Gómez-Martínez, F. J. Lara-Abelenda, P. Peiro-Corbacho, D. Chushig-Muzo, C. Granja, and C. Soguero-Ruiz, \"LM-IGTD: a 2d image generator for low-dimensional and mixed-type tabular data to leverage the potential of convolutional neural networks,\" CoRR, vol. abs/2406.14566, 2024. 9, 17", + "[242] B. Sun, L. Yang, W. Zhang, M. Lin, P. Dong, C. Young, and J. Dong, \"Supertml: Two-dimensional word embedding for the precognition on structured tabular data,\" in CVPR Workshops, 2019. 9, 17", + "[243] Z. Wang, C. Gao, C. Xiao, and J. Sun, \"Meditab: Scaling medical tabular data predictors via data consolidation, enrichment, and refinement,\" in *IJCAI*, 2024, pp. 6062-6070. 9, 19", + "[244] R. Bommasani, D. A. Hudson, E. Adeli, R. Altman, S. Arora, S. von Arx, M. S. Bernstein, J. Bohg, A. Bosselut, E. Brunskill et al., \"On the opportunities and risks of foundation models,\" CoRR, vol. abs/2108.07258, 2021. 8", + "[245] J. Goldberger, G. E. Hinton, S. Roweis, and R. R. Salakhutdinov, \"Neighbourhood components analysis,\" in NIPS, vol. 17, 2004. 10", + "[246] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D. M. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. McCandlish, A. Radford, I. Sutskever, and D. Amodei, \"Language models are few-shot learners,\" in NeurIPS, 2020, pp. 1877-1901. 10, 21", + "[247] R. Tibshirani, \"Regression shrinkage and selection via the lasso,\" Journal of the Royal Statistical Society Series B: Statistical Methodology, vol. 58, no. 1, pp. 267-288, 1996. 10", + "[248] A. E. Hoerl and R. W. Kennard, \"Ridge regression: Biased estimation for nonorthogonal problems,\" Technometrics, vol. 12, no. 1, pp. 55-67, 1970. 10", + "[249] H. Zou and T. Hastie, “Zou h, hastie t. regularization and variable selection via the elastic net.” Journal of the Royal Statistical Society: Series B (Statistical Methodology), vol. 67, pp. 301–320, 2005. 10", + "[250] J. T. Hancock and T. M. Khoshgoftaar, \"Survey on categorical data for neural networks,\" Journal of big data, vol. 7, no. 1, p. 28, 2020. 11", + "[251] J. R. Quinlan, C4.5: programs for machine learning. Elsevier, 2014. 12", + "[252] L. Breiman, \"Random forests,\" Machine learning, vol. 45, pp. 5-32, 2001. 12", + "[253] Z.-H. Zhou and Y. Jiang, \"Nec4. 5: Neural ensemble based c4. 5,\" IEEE Transactions on knowledge and data engineering, vol. 16, no. 6, pp. 770-773, 2004. 12, 14, 20", + "[254] T. Hastie and R. Tibshirani, \"Generalized additive models,\" Statistical science, vol. 1, no. 3, pp. 297-310, 1986. 12", + "[255] R. Agarwal, L. Melnick, N. Frosst, X. Zhang, B. Lengerich, R. Caruana, and G. E. Hinton, \"Neural additive models: Interpretable machine learning with neural nets,\" in NeurIPS, 2021, pp. 4699-4711. 12, 20", + "[256] W.-Y. Wang, W.-W. Du, D. Xu, W. Wang, and W.-C. Peng, \"A survey on self-supervised learning for non-sequential tabular data,\" Machine Learning, vol. 114, no. 1, p. 16, 2025. 13, 14", + "[257] G. Hinton, O. Vinyals, and J. Dean, \"Distilling the knowledge in a neural network,\" CoRR, vol. abs/1503.02531, 2015. 14", + "[258] S. Yun, D. Han, S. Chun, S. J. Oh, Y. Yoo, and J. Choe, \"Cutmix: Regularization strategy to train strong classifiers with localizable features,\" in ICCV, 2019, pp. 6023-6032. 14", + "[259] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, \"mixup: Beyond empirical risk minimization,\" in ICLR, 2018. 14", + "[260] C. Hou and Z.-H. Zhou, \"One-pass learning with incremental and decremental features,\" IEEE transactions on pattern analysis and machine intelligence, vol. 40, no. 11, pp. 2776-2792, 2017. 15", + "[261] H.-J. Ye, D.-C. Zhan, Y. Jiang, and Z.-H. Zhou, \"Rectify heterogeneous models with semantic mapping,\" in ICML, 2018, pp. 5630-5639. 15", + "[262] H.-J. Ye, L. Han, and D.-C. Zhan, \"Revisiting unsupervised meta-learning via the characteristics of few-shot tasks,\" IEEE" + ], + "bbox": [ + 76, + 54, + 491, + 941 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 3, pp. 3721-3737, 2022. 15", + "[263] Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis, L. Zettlemoyer, and V. Stoyanov, \"Roberta: A robustly optimized bert pretraining approach,\" CoRR, vol. abs/1907.11692, 2019. 16", + "[264] F. Mahdisoltani, J. Biega, and F. M. Suchanek, \"YAGO3: A knowledge base from multilingual wikipediais,\" in CIDR, 2015. 16", + "[265] N. Hollmann, S. Müller, and F. Hutter, \"Large language models for automated data science: Introducing caafe for context-aware automated feature engineering,\" in NeurIPS, 2023, pp. 44753-44775. 16", + "[266] S. Han, J. Yoon, S. O. Arik, and T. Pfister, \"Large language models can automatically engineer features for few-shot tabular learning,\" in ICML, 2024, pp. 17454-17479. 16", + "[267] J. Herzig, P. K. Nowak, T. Müller, F. Piccinno, and J. M. Eisenschlos, \"Tapas: Weakly supervised table parsing via pre-training,\" in ACL, 2020, pp. 4320-4333. 16", + "[268] P. Yin, G. Neubig, W. tau Yih, and S. Riedel, \"Tabert: Pretraining for joint understanding of textual and tabular data,\" in ACL, 2020, pp. 8413-8426. 16", + "[269] M. Chen, L. Shen, Z. Li, X. J. Wang, J. Sun, and C. Liu, \"Visions: Visual masked autoencoders are free-lunch zero-shot time series forecasters,\" CoRR, vol. abs/2408.17253, 2024. 16", + "[270] Z. Li, S. Li, and X. Yan, \"Time series as images: Vision transformer for irregularly sampled time series,\" in NeurIPS, 2023, pp. 49 187-49 204. 16", + "[271] A. Kirillov, E. Mintun, N. Ravi, H. Mao, C. Rolland, L. Gustafson, T. Xiao, S. Whitehead, A. C. Berg, W.-Y. Lo, P. Dólar, and R. B. Girshick, \"Segment anything,\" in ICCV, 2023, pp. 3992-4003. 17", + "[272] D. Ha, A. M. Dai, and Q. V. Le, \"Hypernetworks,\" in ICLR, 2017. 18", + "[273] W.-L. Chao, H.-J. Ye, D.-C. Zhan, M. E. Campbell, and K. Q. Weinberger, “Revisiting meta-learning as supervised learning,” CoRR, vol. abs/2002.00573, 2020. 18", + "[274] J. Peters, D. Janzing, and B. Scholkopf, Elements of causal inference: foundations and learning algorithms. The MIT Press, 2017. 18", + "[275] R. Neal, Bayesian Learning for Neural Networks, ser. Incs. springer, 1996. 18", + "[276] S. Müller, N. Hollmann, S. Pineda-Arango, J. Grabocka, and F. Hutter, \"Transformers can do bayesian inference,\" in ICLR, 2022. 18", + "[277] H.-J. Ye, S.-Y. Liu, and W.-L. Chao, \"A closer look at tabpfn v2: Strength, limitation, and extension,\" CoRR, vol. abs/2502.17361, 2025. 18", + "[278] T. Iwata and A. Kumagai, \"Meta-learning of semi-supervised learning from tasks with heterogeneous attribute spaces,\" CoRR, vol. abs/2311.05088, 2023. 18", + "[279] T. Nagler, \"Statistical foundations of prior-data fitted networks,\" in ICML, A. Krause, E. Brunskill, K. Cho, B. Engelhardt, S. Sabato, and J. Scarlett, Eds., 2023, pp. 25660-25676. 18", + "[280] J. Ma, A. Dankar, G. Stein, G. Yu, and A. L. Caterini, \"Tabpfgen - tabular data generation with tabpfn,\" CoRR, vol. abs/2406.05216, 2024. 18", + "[281] S. Ruiz-Villafranca, J. R. Gómez, J. M. C. Gómez, J. C. Mondéjar, and J. L. Martínez, \"A tabpfn-based intrusion detection system for the industrial internet of things,\" The Journal of Supercomputing, vol. 80, no. 14, pp. 20080-20117, 2024. 18", + "[282] A. Margeloiu, A. Bazaga, N. Simidjievski, P. Lio, and M. Jamnik, \"Tabmda: Tabular manifold data augmentation for any classifier using transformers with in-context subsetting,\" CoRR, vol. abs/2406.01805, 2024. 18", + "[283] S. B. Hoo, S. Müller, D. Salinas, and F. Hutter, \"The tabular foundation model tabpfn outperforms specialized time series forecasting models based on simple features,\" CoRR, vol. abs/2501.02945, 2025. 18", + "[284] F. den Breejen, S. Bae, S. Cha, and S.-Y. Yun, \"Fine-tuned in-context learning transformers are excellent tabular data classifiers,\" CoRR, vol. abs/2405.13396v2, 2025. 18, 19", + "[285] Y. Wu and D. L. Bergman, \"Zero-shot meta-learning for tabular prediction tasks with adversarially pre-trained transformer,\" CoRR, vol. abs/2502.04573, 2025. 18", + "[286] J. Qu, D. Holzmüller, G. Varoquaux, and M. L. Morvan, \"Tabicl: A tabular foundation model for in-context learning on large data,\" CoRR, vol. abs/2502.05564, 2025. 18, 19" + ], + "bbox": [ + 506, + 54, + 923, + 941 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 75, + 32, + 410, + 44 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[287] B. Feuer, C. Hegde, and N. Cohen, \"Scaling tabpfn: Sketching and feature selection for tabular prior-data fitted networks,\" CoRR, vol. abs/2311.10609, 2023. 18", + "[288] J. Ma, V. Thomas, G. Yu, and A. L. Caterini, \"In-context data distillation with tabpfn,\" CoRR, vol. abs/2402.06971, 2024. 18", + "[289] B. Feuer, R. T. Schirrmeister, V. Cherepanova, C. Hegde, F. Hutter, M. Goldblum, N. Cohen, and C. White, \"Tunetables: Context optimization for scalable prior-data fitted networks,\" in NeurIPS, 2024, pp. 83430-83464. 18, 19", + "[290] D. Xu, O. Cirit, R. Asadi, Y. Sun, and W. Wang, \"Mixture of in-context prompters for tabular pfns,\" CoRR, vol. abs/2405.16156, 2024. 19", + "[291] M. Koshil, T. Nagler, M. Feurer, and K. Eggensperger, \"Towards localization via data embedding for tabPFN,\" in NeurIPS Workshop, 2024. 19", + "[292] Y. Zeng, W. Kang, and A. C. Mueller, \"Tabflex: Scaling tabular learning to millions with linear attention,\" in NeurIPS Workshop, 2024. 19, 21", + "[293] S. K. Baur and S. Kim, “Exploration of autoregressive models for in-context learning on tabular data,” in NeurIPS Workshop, 2024. 19", + "[294] M. Arbel, D. Salinas, and F. Hutter, \"Equitabpfn: A target-permutation equivariant prior fitted networks,\" CoRR, vol. abs/2502.06684, 2025. 19", + "[295] Y. Sun, X. Wen, S. Zheng, X. Jia, and J. Bian, \"Scaling generative tabular learning for large language models,\" in NeurIPS Workshop, 2024. 19", + "[296] Y. Freund, R. E. Schapire et al., \"Experiments with a new boosting algorithm,\" in ICML, vol. 96, 1996, pp. 148-156. 19", + "[297] Z.-H. Zhou, Ensemble methods: foundations and algorithms. CRC press, 2012. 19", + "[298] Y. Wen, D. Tran, and J. Ba, \"Batchsemble: an alternative approach to efficient ensemble and lifelong learning,\" in ICLR, 2020. 19", + "[299] M. Jayawardhana, Renbo, S. Dooley, V. Cherepanova, A. G. Wilson, F. Hutter, C. White, T. Goldstein, and M. Goldblum, \"Transformers boost the performance of decision trees on tabular data across sample sizes,\" CoRR, vol. abs/2502.02672v2, 2025. 19", + "[300] R. Caruana, A. Munson, and A. Niculescu-Mizil, “Getting the most out of ensemble selection,” in ICDM, 2006, pp. 828-833. 20", + "[301] Y. Wang, B. Jiang, Y. Guo, Q. Gan, D. Wipf, X. Huang, and X. Qiu, \"Prior-fitted networks scale to larger datasets when treated as weak learners,\" CoRR, vol. abs/2503.01256, 2025. 20", + "[302] J. C. Gower, \"A general coefficient of similarity and some of its properties,\" Biometrics, pp. 857-871, 1971. 20", + "[303] F. T. Liu, K. M. Ting, and Z.-H. Zhou, \"Isolation forest,\" in ICDM, 2008, pp. 413-422. 20", + "[304] M. M. Breunig, H.-P. Kriegel, R. T. Ng, and J. Sander, “Lof: identifying density-based local outliers,” in SIGMOD, 2000, pp. 93-104. 20", + "[305] T. Shenkar and L. Wolf, \"Anomaly detection for tabular data with internal contrastive learning,\" in ICLR, 2022. 20", + "[306] A. Li, Y. Zhao, C. Qiu, M. Kloft, P. Smyth, M. Rudolph, and S. Mandt, \"Anomaly detection of tabular data using llms,\" CoRR, vol. abs/2406.16308, 2024. 20", + "[307] C. Lee, J. Kim, and N. Park, \"Codi: Co-evolving contrastive diffusion models for mixed-type tabular synthesis,\" in ICML, 2023, pp. 18940-18956. 20", + "[308] R. Tu, Z. Senane, L. Cao, C. Zhang, H. Kjellström, and G. E. Henter, \"Causality for tabular data synthesis: A high-order structure causal benchmark framework,\" CoRR, vol. abs/2406.08311, 2024. 20", + "[309] R. Feinman and B. M. Lake, \"Generating new concepts with hybrid neuro-symbolic models,\" CoRR, vol. abs/2003.08978, 2020. 20", + "[310] T. Hastie, “The elements of statistical learning: data mining, inference, and prediction,” 2009. 20", + "[311] B. M. Greenwell et al., \"pdp: An r package for constructing partial dependence plots,\" R Journal, vol. 9, no. 1, p. 421, 2017. 20", + "[312] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, C.-S. Chen, and D. T.-H. Chang, \"Dofen: Deep oblivious forest ensemble,\" in NeurIPS, 2024, pp. 44624-44677. 20", + "[313] B. Sun and K. Saenko, \"Deep CORAL: correlation alignment for deep domain adaptation,\" in ECCV Workshops (3), 2016, pp. 443-450. 20", + "[314] C. Kim, T. Kim, S. Woo, J. Y. Yang, and E. Yang, \"Adaptable: Test-time adaptation for tabular data via shift-aware uncertainty cali" + ], + "bbox": [ + 76, + 55, + 491, + 941 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "brator and label distribution handler,\" CoRR, vol. abs/2407.10784, 2024. 20", + "[315] Y. Ganin, E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, and V. S. Lempitsky, \"Domain-adversarial training of neural networks,\" J. Mach. Learn. Res., vol. 17, pp. 59:1-59:35, 2016. 20", + "[316] S. Sagawa, P. W. Koh, T. B. Hashimoto, and P. Liang, \"Distribu-tionally robust neural networks,\" in ICLR, 2020. 20", + "[317] D. Levy, Y. Carmon, J. C. Duchi, and A. Sidford, \"Large-scale methods for distributionally robust optimization,\" in NeurIPS, 2020, pp. 8847-8860. 20", + "[318] J. Zhang, A. K. Menon, A. Veit, S. Bhojanapalli, S. Kumar, and S. Sra, \"Coping with label shift via distributionally robust optimisation,\" in ICLR, 2021. 20", + "[319] H.-R. Cai and H.-J. Ye, \"Understanding the limits of deep tabular methods with temporal shift,\" CoRR, vol. abs/2502.20260, 2025. 21", + "[320] W. Huang, \"Multimodal contrastive learning and tabular attention for automated alzheimers disease prediction,\" in ICCV (Workshops), 2023, pp. 2465-2474. 21", + "[321] S. Du, S. Zheng, Y. Wang, W. Bai, D. P. O'Regan, and C. Qin, \"Tip: Tabular-image pre-training for multimodal classification with incomplete data,\" in ECCV, 2024, pp. 478-496. 21", + "[322] A. Gilani, S. R. Qasim, I. Malik, and F. Shafait, \"Table detection using deep learning,\" in ICDAR, 2017, pp. 771-776. 21", + "[323] M. Li, L. Cui, S. Huang, F. Wei, M. Zhou, and Z. Li, \"Tablebank: Table benchmark for image-based table detection and recognition,\" in LREC, 2020, pp. 1918-1925. 21", + "[324] S. Schreiber, S. Agne, I. Wolf, A. Dengel, and S. Ahmed, \"Deepdesrt: Deep learning for detection and structure recognition of tables in document images,\" in ICDAR, 2017, pp. 1162-1167. 21", + "[325] M. s. Kasem, A. Abdallah, A. Berendeyev, E. Elkady, M. Mahmoud, M. Abdalla, M. Hamada, S. Vascon, D. Nurseitov, and I. Taj-eddin, \"Deep learning for table detection and structure recognition: A survey,\" ACM Computing Surveys, vol. 56, no. 12, pp. 1-41, 2024. 21", + "[326] W. Chen, M.-W. Chang, E. Schlinger, W. Wang, and W. W. Cohen, \"Open question answering over tables and text,\" CoRR, vol. abs/2010.10439, 2020. 21", + "[327] A. Talmor, O. Yoran, A. Catav, D. Lahav, Y. Wang, A. Asai, G. Ilharco, H. Hajishirzi, and J. Berant, \"Multimodalqa: Complex question answering over text, tables and images,\" CoRR, vol. abs/2104.06039, 2021. 21", + "[328] S. Appalaraju, B. Jasani, B. U. Kota, Y. Xie, and R. Manmatha, \"Docformer: End-to-end transformer for document understanding,\" in ICCV, 2021, pp. 993-1003. 21", + "[329] C. Da, P. Wang, and C. Yao, \"Multi-granularity prediction with learnable fusion for scene text recognition,\" CoRR, vol. abs/2307.13244, 2023. 21", + "[330] Z. Gu, C. Meng, K. Wang, J. Lan, W. Wang, M. Gu, and L. Zhang, \"Xylayoutlm: Towards layout-aware multimodal networks for visually-rich document understanding,\" in CVPR, 2022, pp. 4583-4592. 21", + "[331] A. Nassar, N. Livathinos, M. Lysak, and P. Staar, \"Tableformer: Table structure understanding with transformers,\" in CVPR, 2022, pp. 4614-4623. 21", + "[332] G. Kim, T. Hong, M. Yim, J. Park, J. Yim, W. Hwang, S. Yun, D. Han, and S. Park, \"Donut: Document understanding transformer withoutOCR,\" CoRR, vol. abs/2111.15664, 2021. 21", + "[333] H. Feng, Z. Wang, J. Tang, J. Lu, W. Zhou, H. Li, and C. Huang, \"Unidoc: A universal large multimodal model for simultaneous text detection, recognition, spotting and understanding,\" CoRR, vol. abs/2308.11592, 2023. 21", + "[334] J. Wan, S. Song, W. Yu, Y. Liu, W. Cheng, F. Huang, X. Bai, C. Yao, and Z. Yang, \"Omniparser: A unified framework for text spotting key information extraction and table recognition,\" in CVPR, 2024, pp. 15641-15653. 21", + "[335] W. Zhao, H. Feng, Q. Liu, J. Tang, S. Wei, B. Wu, L. Liao, Y. Ye, H. Liu, W. Zhou et al., \"Tabpedia: Towards comprehensive visual table understanding with concept synergy,\" CoRR, vol. abs/2406.01326, 2024. 21", + "[336] Z. Li, B. Yang, Q. Liu, Z. Ma, S. Zhang, J. Yang, Y. Sun, Y. Liu, and X. Bai, \"Monkey: Image resolution and text label are important things for large multi-modal models,\" in CVPR, 2024, pp. 26763-26773. 21" + ], + "bbox": [ + 506, + 55, + 926, + 940 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 75, + 32, + 410, + 44 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[337] Y. Liu, B. Yang, Q. Liu, Z. Li, Z. Ma, S. Zhang, and X. Bai, \"Textmonkey: AnOCR-free large multimodal model for understanding document,\" CoRR, vol. abs/2403.04473, 2024. 21", + "[338] J. Ye, A. Hu, H. Xu, Q. Ye, M. Yan, Y. Dan, C. Zhao, G. Xu, C. Li, J. Tian et al., \"mplug-docowl: Modularized multimodal large language model for document understanding,\" CoRR, vol. abs/2307.02499, 2023. 21", + "[339] N. Deng, Z. Sun, R. He, A. Sikka, Y. Chen, L. Ma, Y. Zhang, and R. Mihalcea, \"Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data,\" CoRR, vol. abs/2402.12424, 2024. 21", + "[340] Z.-H. Zhou, \"Open-environment machine learning,\" National Science Review, vol. 9, no. 8, p. nwac123, 07 2022. 21", + "[341] W. Ren, X. Li, H. Chen, V. Rakesh, Z. Wang, M. Das, and V. G. Honavar, \"Tablog: Test-time adaptation for tabular data using logic rules,\" in ICML, 2024, pp. 42417-42427. 21", + "[342] J. Kaplan, S. McCandlish, T. Henighan, T. B. Brown, B. Chess," + ], + "bbox": [ + 75, + 54, + 493, + 251 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "R. Child, S. Gray, A. Radford, J. Wu, and D. Amodei, \"Scaling laws for neural language models,\" CoRR, vol. abs/2001.08361, 2020. 21", + "[343] Z.-H. Zhou, \"Learnware: on the future of machine learning,\" Frontiers of Computer Science, vol. 10, no. 4, pp. 589-590, 2016. 21", + "[344] Z.-H. Zhou and Z.-H. Tan, \"Learnware: small models do big,\" Science China Information Science, vol. 67, no. 1, 2024. 21", + "[345] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, \"Annotatedtables: A large tabular dataset with language model annotations,\" CoRR, vol. abs/2406.16349, 2024. 21", + "[346] Z.-H. Zhou, \"Learnability with time-sharing computational resource concerns,\" National Science Review, vol. 11, no. 10, p. nwae204, 06 2024. 22", + "[347] W. Liang, Y. Zhang, Y. Kwon, S. Yeung, and J. Y. Zou, \"Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning,\" in NeurIPS, 2022. 22" + ], + "bbox": [ + 506, + 54, + 924, + 250 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX", + "bbox": [ + 73, + 32, + 410, + 44 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 28 + } +] \ No newline at end of file diff --git a/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_model.json b/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b58dc90e3cc3698a634033b1874b11d92bf2a251 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_model.json @@ -0,0 +1,8024 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "1" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.264, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.16109v1 [cs.LG] 17 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.066, + 0.868, + 0.137 + ], + "angle": 0, + "content": "Representation Learning for Tabular Data: A Comprehensive Survey" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.152, + 0.761, + 0.169 + ], + "angle": 0, + "content": "Jun-Peng Jiang, Si-Yang Liu, Hao-Run Cai, Qile Zhou, Han-Jia Ye" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.192, + 0.893, + 0.39 + ], + "angle": 0, + "content": "Abstract—Tabular data, structured as rows and columns, is among the most prevalent data types in machine learning classification and regression applications. Models for learning from tabular data have continuously evolved, with Deep Neural Networks (DNNs) recently demonstrating promising results through their capability of representation learning. In this survey, we systematically introduce the field of tabular representation learning, covering the background, challenges, and benchmarks, along with the pros and cons of using DNNs. We organize existing methods into three main categories according to their generalization capabilities: specialized, transferable, and general models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. We introduce a hierarchical taxonomy for specialized models based on the key aspects of tabular data—features, samples, and objectives—and delve into detailed strategies for obtaining high-quality feature- and sample-level representations. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks, leveraging knowledge acquired from homogeneous or heterogeneous sources, or even cross-modalities such as vision and language. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning. We group these general models based on the strategies used to adapt across heterogeneous datasets. Additionally, we explore ensemble methods, which integrate the strengths of multiple tabular models. Finally, we discuss representative extensions of tabular learning, including open-environment tabular machine learning, multimodal learning with tabular data, and tabular understanding tasks. More information can be found in the following repository: https://github.com/LAMDA-Tabular/Tabular-Survey." + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.401, + 0.88, + 0.415 + ], + "angle": 0, + "content": "Index Terms—Tabular Data, Representation Learning, Deep Tabular Learning, Tabular Machine Learning, Tabular Foundation Model" + }, + { + "type": "image", + "bbox": [ + 0.491, + 0.422, + 0.506, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.475, + 0.23, + 0.489 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.495, + 0.495, + 0.627 + ], + "angle": 0, + "content": "Tabular data, characterized by structured rows and columns, is one of the most prevalent data formats in real-world machine learning applications, spanning diverse domains such as finance [1], healthcare [2], education [3], recommendation systems [4], and scientific research. In particular, AI for scientific research (AI4science) has increasingly relied on tabular data, as numerous prominent datasets—such as those from genomics [5], chemistry [6], and climate science [7], [8]—naturally adopt tabular forms." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.628, + 0.494, + 0.862 + ], + "angle": 0, + "content": "Tabular data inherently organizes information in a structured, table-like format. In this survey, we focus primarily on supervised tabular machine learning tasks, specifically classification and regression. Beyond their structured organization, tabular datasets frequently include heterogeneous attributes [9], encompassing numerical, categorical, or mixed data types that may be either dense or sparse. Additionally, many tabular datasets present quality challenges, such as noisy measurements, missing values, outliers, inaccuracies [10], and privacy constraints [11], all of which complicate the modeling process. The most common supervised tabular tasks are classification and regression, where the goal is to learn mappings from training data to discrete or continuous targets, respectively. As illustrated in Figure 1, each row represents an instance (with its corresponding label), while each column corresponds to a specific attribute or feature [12]." + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.476, + 0.918, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.654, + 0.925, + 0.743 + ], + "angle": 0, + "content": "Figure 1: A brief introduction to tabular data and associated learning tasks. Each row represents an instance and each column corresponds to a specific attribute or feature, which can be numerical or categorical. The most common tabular machine learning tasks are classification and regression as shown in the right side of the figure." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.752, + 0.925, + 0.797 + ], + "angle": 0, + "content": "Ideally, learned mappings should generalize effectively, accurately predicting outcomes for new instances drawn from the same underlying distribution." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.797, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Machine learning methods for tabular data have evolved significantly over the years [13], [14], [15], [16]. Recently, the rise of deep learning has profoundly impacted domains like computer vision [17] and natural language processing [18], where Deep Neural Networks (DNNs) extract semantic representations directly from raw inputs [19], [20], [21]. These learned representations have not only improved generalization but have also facilitated knowledge transfer across related tasks [22]. The flexibility of DNNs in modeling complex feature interactions and learning rich hierarchical" + }, + { + "type": "page_footnote", + "bbox": [ + 0.073, + 0.877, + 0.493, + 0.937 + ], + "angle": 0, + "content": "- J.-P. Jiang, S.-Y Liu, H.-R Cai, Q. Zhou, and H.-J. Ye are with School of Artificial Intelligence, Nanjing University, and National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210023, China. E-mail: {jiangjp.liusy,zhouql.yehj}@lamda.nju.edu.cn, caihr@smail.nju.edu.cn" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "2" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.059, + 0.904, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.072, + 0.371, + 0.925, + 0.446 + ], + "angle": 0, + "content": "Figure 2: We organize existing tabular classification/regression methods into three categories according to their generalization capabilities: specialized (left), transferable (middle), and general (right) models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.453, + 0.492, + 0.482 + ], + "angle": 0, + "content": "structures has inspired significant interest in adapting deep learning techniques to tabular data." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.485, + 0.493, + 0.734 + ], + "angle": 0, + "content": "Indeed, DNNs were applied to tabular data decades ago, initially targeting dimensionality reduction and visualization tasks [23], [24], [25], [26], yet they typically struggled to match tree-based methods on standard classification and regression problems. Later advances in DNNs have led to significant improvements across various tabular-related applications, such as click-through rate prediction [27], [28], anomaly detection [29], recommendation systems [30], and time series forecasting [31], [32]. Modern deep learning approaches, benefiting from better-designed architectures, optimized training strategies, high-quality representations, have revitalized DNN performance on tabular data, often rivaling or surpassing traditional tree-based models [33], [34], [35]. Given the wide variety of approaches emerging in deep tabular modeling, a systematic overview that revisits critical factors and current methodologies in representation learning for tabular data has become increasingly necessary." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.736, + 0.492, + 0.882 + ], + "angle": 0, + "content": "This survey begins by introducing the background of tabular data learning, highlighting the challenges involved and critically examining the advantages and limitations of utilizing DNNs compared to classical—particularly tree-based—methods [36], [37], [38], [39]. Given the observed instability of method performance across different tabular datasets, we also discuss comprehensive strategies for dataset collection, evaluation, and analysis, aiming to establish robust criteria for aggregating performance metrics across multiple datasets [40], [41], [42], [43]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.884, + 0.492, + 0.945 + ], + "angle": 0, + "content": "We broadly categorize deep tabular methods into three types: specialized methods, transferable methods, and general methods, distinguished by the scope of datasets on which they are trained and deployed, as well as their corresponding" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.453, + 0.927, + 0.802 + ], + "angle": 0, + "content": "generalization capabilities (illustrated in Figure 2). Specialized tabular methods align closely with classical supervised models, typically trained and evaluated on data drawn from the same distribution. In contrast, transferable methods leverage knowledge from models pre-trained on one or multiple source datasets, subsequently fine-tuning these models on target datasets; the primary challenge here lies in addressing the heterogeneity between pre-trained sources and target tasks. The recently proposed general tabular methods—motivated by the remarkable \"zero-shot\" generalization abilities demonstrated by large language models (LLMs)—exhibit exceptional versatility. These general models can directly apply their learned representations to downstream tabular datasets without additional fine-tuning, achieving robust generalization due to advanced pre-training strategies. Although the generalization ability tends to increase from specialized to general models, it does not imply that specialized or transferable methods are less valuable; specialized models remain superior on large-scale datasets, and fine-tuning general models can further improve their predictive performance. Additionally, the first two types of methods provide foundational insights and valuable components that contribute significantly to advancements in general tabular models." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.812, + 0.925, + 0.945 + ], + "angle": 0, + "content": "For specialized methods, numerous designs have been proposed from diverse perspectives, and previous papers have often categorized these methods based primarily on their architectural characteristics or behaviors. Existing taxonomies [44], for example, group specialized methods into feature-preprocessing-based [33], [45], data-augmentation-based [46], [47], [48], [49], MLP variants [50], [34], specialized DNN architectures [51], [52], [53], [54], [55], [56], [57], [58], tree-mimic approaches [59], [60], [61], token-based tech" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.053, + 0.493, + 0.242 + ], + "angle": 0, + "content": "niques [62], [63], [33], [64], [65], regularization-driven methods [66], [67], and neighborhood-based strategies [68], [69], [35]. However, such categorizations can appear scattered, making it difficult to connect the core ideas between methods placed in distinct groups. In contrast, this survey introduces a hierarchical taxonomy based on the key aspects of tabular data—features, samples, and objectives—providing a cohesive organizational framework. Our approach emphasizes detailed strategies for obtaining high-quality representations at both feature- and sample-levels. This unified perspective helps bridge core ideas across diverse methods, facilitating clearer comparative discussions and potentially guiding the design of future, more advanced tabular models." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.244, + 0.493, + 0.622 + ], + "angle": 0, + "content": "Instead of training a model from scratch on a single tabular dataset, transferable models leverage knowledge encoded in a pre-trained model from another dataset, which can significantly enhance the training process, especially when data or computational resources for the target task are limited. A major challenge in transferring knowledge across tabular tasks lies in the inherent heterogeneity between the source and target datasets, particularly differences in their feature and label spaces. In this survey, we adopt a broad perspective on transferable tabular models, categorizing methods based on the sources of their pre-trained knowledge. Specifically, we discuss models pre-trained on homogeneous tabular domains, such as self-supervised methods with additional pre-training steps on the target dataset itself [70], [71]; models pre-trained across heterogeneous tabular domains [72], [73], [64]; and methods transferring knowledge from other modalities, such as vision-based pre-trained models [74], [75], [76]. Additionally, since incorporating attribute semantics (when available) is a common strategy for bridging heterogeneous attribute spaces across tabular datasets [77], [78], [79], we also explore approaches leveraging language models in the final category. In particular, we further organize these language model-based strategies according to the methods they use to extract knowledge and the types of language models involved—ranging from small-scale language models to Large Language Models (LLMs) [80], [81], [82], [83]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.622, + 0.493, + 0.927 + ], + "angle": 0, + "content": "Inspired by recent advancements in foundation models from vision and language domains [84], [85], general models—also known as tabular foundation models—expand the concept of transferable tabular models by enabling direct application to downstream tasks without additional fine-tuning. This capability, commonly referred to as the model's \"zero-shot\" ability, significantly enhances the model's usability across diverse tabular datasets. In contrast to transferable models, which primarily focus on bridging knowledge gaps between source and target datasets, general models aim to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. We categorize these general models based on the strategies used to achieve adaptiveness across diverse tabular tasks, specifically examining adaptations from both data-centric [86] and model-centric perspectives [87], [88]. Furthermore, we discuss critical branches of general tabular models in detail: the TabPFN variants leveraging in-context learning [89], [90], [91], and methods utilizing attribute and task semantics to unify heterogeneous tasks within a common representation framework [92], [93], [94]." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.928, + 0.493, + 0.943 + ], + "angle": 0, + "content": "Additionally, ensemble methods [95], [96], [91] are in" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.215 + ], + "angle": 0, + "content": "produced, which improve the generalization ability based on the strengths of multiple tabular models. Finally, we briefly overview other relevant extensions of tabular learning, including clustering [97], [98], anomaly detection [99], [100], [101], data generation and imputation [102], [103], [104], interpretability [63], [105], [61], multimodal learning [106], [107], open-environment tabular machine learning [108], [109], [110], [111], and tabular understanding [112], [113]. By summarizing the state of the field and identifying open challenges, we aim to guide future research and applications in tabular data representation learning." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.234, + 0.655, + 0.248 + ], + "angle": 0, + "content": "2 BACKGROUND" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.253, + 0.924, + 0.312 + ], + "angle": 0, + "content": "This section presents the (supervised) tabular machine learning task, including the notation of tabular data learning, the history of tabular data, the challenges of learning from tabular data, evaluation metrics, and tabular benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.33, + 0.746, + 0.345 + ], + "angle": 0, + "content": "2.1 Learning with Tabular Data" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.348, + 0.924, + 0.525 + ], + "angle": 0, + "content": "A supervised tabular dataset is formatted as \\(N\\) examples and \\(d\\) features/attributes corresponding to \\(N\\) rows and \\(d\\) columns in the table. An instance \\(\\pmb{x}_i\\in \\mathbb{R}^d\\) is depicted by its \\(d\\) feature values. Assume \\(x_{i,j}\\) as the \\(j\\)-th feature of instance \\(\\pmb{x}_i\\), it could be a numerical (continuous) one \\(x_{i,j}^{\\mathrm{num}}\\in \\mathbb{R}\\), like the temperature of a region or the density of the object. \\(\\pmb{x}_i\\) can also be a categorical (discrete) value \\(x_{i,j}^{\\mathrm{cat}}\\), like one of multiple colors, the location of a person, or even some textual descriptions of the instance. Each instance is associated with a label \\(y_i\\), where \\(y_i\\in \\{1, - 1\\}\\) in a binary classification task, \\(y_i\\in [C] = \\{1,\\dots ,C\\}\\) in a multi-class classification task, and \\(y_i\\in \\mathbb{R}\\) in a regression task." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.526, + 0.925, + 0.63 + ], + "angle": 0, + "content": "Remark 1. Ordinal regression [114], [115], also called ordinal classification, is a type of regression analysis used to predict an ordinal variable. It can be considered an intermediate problem between regression and classification. However, this survey primarily focuses on standard classification and regression tasks and does not specifically discuss ordinal regression." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.634, + 0.924, + 0.694 + ], + "angle": 0, + "content": "Given a tabular dataset \\(\\mathcal{D} = \\{(x_i, y_i)\\}_{i=1}^N\\), we aim to learn a mapping \\(f\\) on \\(\\mathcal{D}\\) that maps \\(x_i\\) to its label \\(y_i\\). In other words, the model predicts \\(x_i\\) with \\(\\hat{y}_i = f(x_i)\\). The general objective learning \\(f\\) follows the structural risk minimization:" + }, + { + "type": "equation", + "bbox": [ + 0.574, + 0.7, + 0.924, + 0.733 + ], + "angle": 0, + "content": "\\[\n\\min _ {f} \\sum_ {\\left(\\boldsymbol {x} _ {i}, y _ {i}\\right) \\in \\mathcal {D}} \\ell (y, \\hat {y} _ {i} = f \\left(\\boldsymbol {x} _ {i}\\right)) + \\Omega (f). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.738, + 0.923, + 0.826 + ], + "angle": 0, + "content": "\\(\\ell (\\cdot ,\\cdot)\\) measures the discrepancy between the predicted label \\(\\hat{y}_i\\) and the true label \\(y_{i},e.g.\\), cross-entropy in classification and mean square error in regression. \\(\\Omega (\\cdot)\\) is the regularization on the model, which restricts the complexity of \\(f\\). We expect the learned \\(f\\) is able to extend its ability to unseen instances sampled from the same distribution as \\(\\mathcal{D}\\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.826, + 0.924, + 0.899 + ], + "angle": 0, + "content": "Tabular methods differ in their strategies to implement \\( f \\). The \"dummy\" approach makes predictions based on training labels \\( \\{y_i\\}_{i=1}^N \\) directly, which outputs the major class in the training set for classification and the average of all labels for regression, respectively." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.925, + 0.943 + ], + "angle": 0, + "content": "In a \\(C\\)-class classification task, classical parametric methods implement \\(f\\) with a linear mapping, i.e., \\(f(\\pmb{x}_i) = \\pmb{W}^\\top \\pmb{x}_i + \\pmb{b}\\), where the classifier \\(\\pmb{W} \\in \\mathbb{R}^{d \\times C}\\) and \\(\\pmb{b} \\in \\mathbb{R}^C\\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "4" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.185 + ], + "angle": 0, + "content": "is the bias. With different loss functions, we can implement Logistic Regression, SVM, or even AdaBoost. In contrast, non-parametric methods implement the prediction via \\( f(\\pmb{x}_i) = f(\\pmb{x}_i, \\mathcal{D}) \\), depending on the whole training set. For example, KNN searches neighbors in the training set \\( \\mathcal{D} \\) with the \\( K \\) smallest distance w.r.t. \\( \\pmb{x}_i \\). KNN can be viewed as a specific label smoother, with a dynamic local region for every instance. [116] links KNN and Random Forest from their ways of smoothing training labels in their predictions." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.186, + 0.493, + 0.304 + ], + "angle": 0, + "content": "Deep tabular methods implement \\( f \\) with a deep neural network, e.g. Most deep learning models could be decomposed into two parts, i.e., \\( f(\\pmb{x}_i) = \\pmb{W}^\\top \\phi(\\pmb{x}_i) + \\pmb{b} \\). Similar to the linear model, \\( \\pmb{W} \\) and \\( \\pmb{b} \\) are the components of linear classifier, with \\( \\pmb{W} \\in \\mathbb{R}^{d' \\times C} \\). \\( \\phi \\) maps the input vector \\( \\pmb{x}_i \\) into the \\( d' \\) dimension space, which extracts semantic embeddings for the given tabular input. \\( \\phi \\) could be implemented with MLP or residual network." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.324, + 0.286, + 0.339 + ], + "angle": 0, + "content": "2.2 History of Tabular Data" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.344, + 0.497, + 0.519 + ], + "angle": 0, + "content": "Historically, classical machine learning tasks were predominantly formulated with tabular data, or datasets readily transformed into a tabular representation without explicitly designating them as \"tabular.\" In early literature, the term \"tabular\" typically referred to tables within relational databases [117], CSV files on the web [118], or tables in documents [119]. Relevant tasks included table extraction [120], parsing [121], understanding [122], and discovering association rules [123]. With the expansion of machine learning applications into other modalities such as images, texts, audio, and video, the classical vector-based data representations have come to be explicitly termed \"tabular data.\"" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.52, + 0.493, + 0.71 + ], + "angle": 0, + "content": "Early statistical approaches such as linear regression, logistic regression, linear discriminant analysis, and K-Nearest Neighbors (KNN) predate artificial intelligence. Classical learning methods further expanded across various paradigms, including decision trees [124], [125], multi-layer perceptrons (MLPs), support vector machines (SVMs), and nearest centroid classifiers [5], [14]. Ensemble methods enhanced predictive performance by aggregating outputs from multiple base learners [126], [127]. More recently, gradient boosting frameworks [128], [129], such as XGBoost [130], LightGBM [131], and CatBoost [132], have become prominent due to their effectiveness and efficiency in tabular data applications and competitions [133], [134]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.71, + 0.496, + 0.945 + ], + "angle": 0, + "content": "With the development of deep learning, DNNs were applied to tabular classification and regression tasks decades ago, utilizing architectures such as stacked Restricted Boltzmann Machines and denoising autoencoders [135], [136], [137]. Early representation learning efforts primarily focused on dimensionality reduction and data visualization tasks [23], [24], [25], [26], yet these models struggled to surpass traditional tree-based methods in terms of generalization. However, advancements in neural network architectures and representation learning strategies have recently led to promising results in related tabular domains, including click-through rate prediction [27], [28], anomaly detection [138], [29], recommendation systems [139], [30], and time series forecasting [31], [140], [32], [141]. Innovations such as convolutional layers and learnable feature embeddings have improved the ability of deep models to capture high-order" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.127 + ], + "angle": 0, + "content": "attribute relationships [142], [143]. While early deep tabular methods lagged behind ensemble tree-based models, recent techniques have demonstrated competitive or superior performance [33], [34], [35], affirming deep representation learning as a promising direction for tabular data modeling." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.127, + 0.927, + 0.26 + ], + "angle": 0, + "content": "While several survey papers have been published [9], [144], the field of tabular data has witnessed remarkable progress over the past two years. On one hand, the emergence of new specialized methods has introduced significant shifts in the landscape, motivating the need for our comprehensive taxonomy. On the other hand, the rise of transferable and general approaches has greatly enhanced the generality and applicability of tabular data modeling, which has been overlooked in previous works." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.281, + 0.855, + 0.295 + ], + "angle": 0, + "content": "2.3 Challenges of Learning from Tabular Data" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.3, + 0.924, + 0.343 + ], + "angle": 0, + "content": "Different from other types of data sources, e.g., images and texts, there exist several challenges dealing with tabular datasets due to their characteristics." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.344, + 0.925, + 0.49 + ], + "angle": 0, + "content": "Heterogeneity of Features. Unlike continuous image data or token-based textual data, tabular datasets often contain both numerical and categorical attributes, each requiring distinct handling methods [9], [145]. Numerical features frequently exhibit varying ranges and distributions, necessitating normalization or scaling. Categorical features differ in cardinality and semantic interpretation, requiring encoding methods like one-hot vectors or embeddings. Consequently, tabular models must carefully handle these mixed data types to preserve the usability of each feature." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.49, + 0.925, + 0.651 + ], + "angle": 0, + "content": "Lack of Spatial Relationships. Tabular data inherently lacks spatial or sequential relationships that are naturally found in other modalities [74], [50]. The order of columns has no semantic or spatial meaning, making tabular data permutation-invariant regarding features. Moreover, standard tabular machine learning assumes rows are independently and identically distributed (i.i.d.), further eliminating temporal or sequential correlations present in data such as video or time series. This absence of inherent spatial or sequential structure challenges deep learning architectures traditionally designed to exploit such dependencies." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.651, + 0.925, + 0.797 + ], + "angle": 0, + "content": "Low-quality and Missing Data. Compared to image or text data, where contextual or spatial redundancies help manage missing or corrupted values, tabular data is more vulnerable to incomplete or erroneous entries [146], [147]. Missing values in tabular datasets can introduce significant biases and degrade prediction quality. Additionally, noisy or incorrect values can considerably affect model reliability. Data preprocessing steps, including data cleaning and imputation, become crucial to maintaining accuracy and robustness in tabular machine learning." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.797, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Importance of Feature Engineering. Effective tabular models heavily depend on the quality of their input features [45], [148]. Unlike image or textual data, where DNNs inherently learn feature representations from raw data, tabular methods often require domain-specific knowledge and meticulous manual feature engineering. Identifying and modeling complex, nonlinear interactions among tabular features frequently demands sophisticated transformations and expert insight, significantly impacting the predictive performance of models [149]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.244 + ], + "angle": 0, + "content": "Class Imbalance. Tabular datasets frequently exhibit imbalanced label distributions, especially in classification tasks, where certain categories are underrepresented [150], [151]. Class imbalance complicates model learning, leading to biased outcomes toward majority classes and poor performance on minority classes. Specialized methods such as oversampling, undersampling, or tailored loss functions (e.g., focal loss [152]) are required to address this imbalance effectively. Evaluation criteria like the AUC or F1-score further help assess model quality in imbalanced settings. Recent research highlights differences between deep and classical models in handling imbalance, emphasizing the need for careful consideration [153], [154], [155], [41]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.248, + 0.493, + 0.41 + ], + "angle": 0, + "content": "Remark 2. Class imbalance has long been a known issue in the tabular domain, even before the rise of deep learning [156], and methods such as SMOTE [157], [158] can easily be extended to deep learning methods during preprocessing. However, Current deep tabular methods primarily assume that the training and testing data come from the same distribution, even in cases involving class imbalance. In addition, some class imbalance methods in visual domain can be readily extended to the tabular data learning [159], [160]. Therefore, we do not delve into class imbalance in this survey." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.418, + 0.493, + 0.711 + ], + "angle": 0, + "content": "Scalability to Large Datasets. Tabular datasets can become large-scale and high-dimensional, presenting computational and generalization challenges [161]. With increasing dimensionality, the risk of overfitting increases, especially when the number of features significantly surpasses the number of samples. Consequently, efficient training algorithms, memory management strategies, and sufficient computational resources become essential. Effectively scaling tabular models to handle large datasets while maintaining generalization ability remains a challenging but critical research area [162]. Model Selection and Hyperparameter Tuning. Tabular models are particularly sensitive to hyperparameter settings [163], [164]. Selecting an appropriate model architecture and tuning hyperparameters, such as learning rate, layer depth, or number of trees, can be computationally expensive and time-consuming. Despite the advancement of automated machine learning (AutoML) techniques [165], [166], [167], efficiently identifying optimal configurations for deep tabular methods under practical constraints remains challenging and critical for achieving high predictive performance." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.711, + 0.493, + 0.858 + ], + "angle": 0, + "content": "Domain-Specific Constraints. Certain application domains, such as healthcare or finance, impose additional regulatory or ethical requirements on model development [168]. For example, healthcare applications must comply with privacy standards like HIPAA [169] and provide explainability to clinicians. Financial models similarly must adhere to fairness regulations and industry standards. These constraints can restrict algorithm selection, necessitate interpretable predictions, and require additional validation, explainability, and auditability procedures [170], [171], [172]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.878, + 0.345, + 0.893 + ], + "angle": 0, + "content": "2.4 Evaluation of a Tabular Method" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.899, + 0.493, + 0.945 + ], + "angle": 0, + "content": "We present the evaluation of tabular methods, ranging from traditional to modern, to provide a comprehensive evaluation across different aspects. For a given model on a dataset \\(\\mathcal{D}\\)," + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "we employ standard metrics that quantify the discrepancy between the predicted label \\(\\hat{y}_i\\) and the true label \\(y_i\\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.084, + 0.925, + 0.272 + ], + "angle": 0, + "content": "Evaluation on A Single Task. For classification tasks, Accuracy (or Error Rate) is commonly employed as the primary metric. AUC and F1 scores are further used to address imbalanced label distributions, while Expected Calibration Error (ECE) [173], [174] calculates the weighted average error of the estimated probabilities. All criteria are the higher, the better, except the error rate and ECE. For regression tasks, common metrics include Mean Squared Error (MSE), Mean Absolute Error (MAE), and Root Mean Squared Error (RMSE), with MAE and RMSE sharing the scale of the original labels. Lower values denote superior performance. Additionally, the coefficient of determination \\((\\mathbb{R}^2)\\) is employed, with higher values indicating a better fit." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.273, + 0.925, + 0.359 + ], + "angle": 0, + "content": "In tabular machine learning, the diversity of datasets makes it difficult for any single model to consistently excel across all scenarios. Therefore, evaluating models requires not only assessing their performance on individual datasets but also employing aggregated metrics that capture their overall effectiveness across multiple datasets." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.36, + 0.926, + 0.651 + ], + "angle": 0, + "content": "Evaluation on A Set of Tasks. Early research predominantly relied on Average Rank (Friedman Rank) [12], [39], often used in conjunction with Critical Difference Comparisons, to evaluate model performance across multiple datasets. Models are ranked per dataset based on a chosen metric (e.g., accuracy, AUC, RMSE), and the average rank is computed across datasets. To ensure statistical robustness, hypothesis tests were employed to assess the significance of ranking differences, providing a more reliable comparative analysis. For multiple comparisons, tests such as the Wilcoxon-Holm, Fredman, and Nemiyi tests are employed [175]. To address the potential degradation of average rank by poor performance on some datasets, the Probability of Achieving the Maximum Accuracy (PAMA) [12] is defined as the fraction of datasets in which a model attains the highest accuracy. An alternative to PAMA accounts for near-optimal performance, \\( P95 \\) quantifies the likelihood of a model attaining at least \\( 95\\% \\) of the maximum accuracy, which is computed as the ratio of datasets where the classifier achieves at least \\( 95\\% \\) of the maximum accuracy to the total number of datasets." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.652, + 0.926, + 0.856 + ], + "angle": 0, + "content": "As research progressed, more diverse evaluation metrics were introduced. The Arithmetic Mean of a chosen metric provides a direct comparison across datasets, but variations in the scales of evaluation metrics across datasets can distort results. To mitigate this issue, performance metrics are often normalized before aggregation, with normalized Accuracy applied to classification tasks and normalized RMSE (nRMSE) used for regression [36], [34]. Depending on the evaluation framework, Mean Normalized Error can be used, but its dependence on normalization can hinder independent optimization. To further address these limitations, the Shifted Geometric Mean (SGM) error was introduced, which aggregates errors multiplicatively, reducing sensitivity to extreme values and ensuring more stable cross-datasets/splits comparisons [34]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.856, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Beyond absolute performance, relative comparisons are also important. The Relative Improvement metric quantifies a model's performance gain over a baseline (e.g., a simple MLP), offering insight into efficiency relative to simpler alternatives [176]. More recently, drawing inspiration from the ELO rating system[177], [178], ELO-based evaluation has" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.114 + ], + "angle": 0, + "content": "been introduced [179], modeling model-to-model comparisons as pairwise competitions across datasets. The ELO Score iteratively adjusts rankings based on relative performance, providing a more dynamic, fine-grained assessment." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.133, + 0.37, + 0.146 + ], + "angle": 0, + "content": "2.5 Tabular Benchmarks and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.152, + 0.493, + 0.197 + ], + "angle": 0, + "content": "This section introduces existing benchmarks and datasets, along with associated considerations for constructing the benchmarks and evaluation protocols." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.21, + 0.424, + 0.224 + ], + "angle": 0, + "content": "2.5.1 Popular Tabular Benchmarks and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.228, + 0.49, + 0.286 + ], + "angle": 0, + "content": "We first introduce several benchmarks based on raw features constructed from various aspects. Then, we present datasets with rich semantics, following some tabular toolboxes and evaluation protocols." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.287, + 0.492, + 0.373 + ], + "angle": 0, + "content": "Standard Benchmarks. Methods for tabular data have preferences depending on the dataset, and evaluating them on limited datasets can be easily influenced by randomness or other factors. Therefore, it's important to consider various aspects to ensure a more comprehensive and reliable benchmark evaluation." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.374, + 0.493, + 0.577 + ], + "angle": 0, + "content": "A comprehensive benchmark should cover a diverse set of datasets to test the model's generalization capabilities across different tasks and feature types. The benchmark should include datasets from different task types, including binary classification, multi-class classification, and regression tasks. [12] evaluates 179 classifiers across 17 families on 121 datasets, concluding that Random Forest variants were the most likely to perform best overall. [50] explores MLPs with parameterized techniques, such as ensembling and data augmentation, over 40 classification datasets. Similarly, [33] demonstrates the effectiveness of MLPs, ResNets, and transformer-based models on 11 datasets. [36] conducts experiments on 45 datasets, investigating the differences between tree-based and DNN-based methods." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.579, + 0.493, + 0.753 + ], + "angle": 0, + "content": "The benchmark should cover datasets with varying sizes, including datasets with a large number of samples and features as well as smaller datasets. The diversity of dataset sizes helps evaluate the scalability and efficiency of different models. [39] includes 176 classification datasets and evaluate 19 methods, comprising 8 classical and 11 deep methods. In this study, the pre-trained TabPFN model [89] emerges as the top performer on average, even when limited to randomly sampled training sets of 3000 examples. However, limited trials for hyperparameter tuning and strict time constraints in [39] may have led to suboptimal evaluations for some deep tabular methods [180]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.753, + 0.495, + 0.945 + ], + "angle": 0, + "content": "To ensure robustness and generalization, datasets from multiple domains should be included. Common domains for tabular data include healthcare, biology, finance, education, and physics. Additionally, some datasets are derived from other domains, such as image or speech data, by feature extraction. [181] evaluates attention mechanisms and contrastive learning methods across 28 tabular datasets, comparing their performance with traditional deep learning and machine learning approaches. [44], with a particular focus on DNN-based models, uses a benchmark of over 300 tabular datasets spanning a wide range of task types, sizes, and domains. A more diverse collection allows us to assess whether a tabular method can generalize across applications." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.418 + ], + "angle": 0, + "content": "Semantic-Enriched Datasets. In addition, recent research has also focused on evaluating tabular data with rich semantics, such as incorporating meta information related to tasks or integrating attribute names. UniTabE [182] introduces a 7TB dataset containing 13 billion tabular examples for tabular pre-training, covering domains with investing, time series analysis, finance, economics, and with numerical, categorical, text data types. CM2 [79] proposes OpenTabs for crosstab pre-training, which contains an extensive collection of large-scale tables with column name semantics, including approximately 46M tabular samples. TP-BERTa [78] filters the OpenTabs for datasets with at least 10,000 samples and no more than 32 features, resulting in 101 binary classification datasets and 101 regression datasets with about 10 million samples. GTL [81] curates a collection of 384 public tabular datasets from Kaggle, which includes 176 classification and 208 regression tasks spanning a wide range of industrial domains. TabLib collects a set of 627M tables totaling 69TiB, along with 867B tokens of context [183]. TabLib was extracted from numerous file formats, including CSV, HTML, SQLite, PDF, Excel, and others, sourced from GitHub and Common Crawl. T4 (The Tremendous Tablib Trawl) [92] takes account of the inscrutable statistics and call sheets with personally identifiable information in TabLib and filters TabLib into a collection of 4M tables with 2.1B rows." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.419, + 0.927, + 0.551 + ], + "angle": 0, + "content": "Among these benchmarks and datasets, the semantic-rich ones are primarily used for pre-training LLMs on tabular data, while the others are mainly employed for evaluating standard methods. Besides, some toolboxes implement methods over tabular data, including those for classical methods, as well as those for deep tabular methods [184], [185], [186], [187], [188]. To establish a comprehensive tabular benchmark, several factors need to be considered, including the range of datasets and data quality." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.554, + 0.927, + 0.702 + ], + "angle": 0, + "content": "Remark 3. Recent studies have proposed alternative perspectives for tabular evaluations, such as focusing on dataset age [42], leveraging expert-level feature engineering [43], and considering dataset version [44]. Studies have also highlighted generalization in open word environments in tabular datasets [43], [109], where the distributions of training, validation, and test sets differ significantly. More discussions are in Section 9. Incorporating diverse, high-quality datasets helps build a reliable benchmark for meaningful model comparisons." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.72, + 0.704, + 0.733 + ], + "angle": 0, + "content": "2.5.2 Evaluation Protocols" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.738, + 0.925, + 0.812 + ], + "angle": 0, + "content": "Given the strong sensitivity of tabular methods to data and the additional randomness in deep methods, robust evaluation is essential. Furthermore, due to the high computational cost of some methods, it is equally important to ensure evaluation efficiency." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.812, + 0.927, + 0.945 + ], + "angle": 0, + "content": "Model Selection. Model selection on the validation set involves both hyperparameter tuning and early stopping, which are essential for reliable evaluation. Due to the large number of hyperparameters in deep methods, automated methods like Optuna [189] are commonly used to explore hyperparameters through multiple trials [33], [69]. During tuning, models are evaluated on the validation split, while models can also be trained with multiple random seeds, providing more reliable evaluations. In each trial and the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.098 + ], + "angle": 0, + "content": "final training, early stopping [190] often employed to prevent overfitting, and the epoch with the best validation performance is selected as the final model." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.098, + 0.493, + 0.302 + ], + "angle": 0, + "content": "Performance Evaluation. To assess generalization and prevent overfitting, models are typically evaluated using separate train/val/test splits, with a typical split ratio of \\(64\\% / 16\\% / 20\\%\\). However, such fixed splits may yield inconsistent results. With the rise of deep learning, researchers have proposed more robust evaluation protocols to better reflect model capabilities [191]. Two main approaches are commonly used: (1) fixing the data split and running multiple trials with different random seeds [54], [59], [105], [69], [62], [87], [33], [58], [192], [65], [71]; and (2) using cross-validation, where new train/val/test splits are generated in each fold [63], [89], [193], [68], [34]. A hybrid strategy combining both random seeds and cross-validation is also adopted [194]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.302, + 0.493, + 0.522 + ], + "angle": 0, + "content": "Recent studies show that holdout-based hyperparameter tuning can be unstable and prone to overfitting to the validation set [195], [180]. [180] found it ineffective on most TabZilla [39] datasets and instead used 5-fold cross-validation for more robust hyperparameter selection. As a result, they found the key meta-feature findings reported in [39] no longer held. This observation was also discussed in [44], which further identified meta-features that have a greater impact on model performance. For small datasets, alternative strategies have been proposed [196], [197], [198]. However, this approach significantly reduces the efficiency of hyperparameter search. [199] showed that simply reshuffling data splits can often improve generalization, making holdout selection competitive with cross-validation while remaining more computationally efficient." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.545, + 0.406, + 0.56 + ], + "angle": 0, + "content": "3 FROM CLASSICAL TO DEEP METHOD" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.565, + 0.491, + 0.61 + ], + "angle": 0, + "content": "We present possible advantages of deep learning for tabular data, as well as the potential challenges of deep learning when compared with tree-based methods." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.631, + 0.441, + 0.646 + ], + "angle": 0, + "content": "3.1 Advantages of deep representation learning" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.651, + 0.493, + 0.68 + ], + "angle": 0, + "content": "Deep tabular models offer several advantages beyond performance when compared with classical methods." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.681, + 0.493, + 0.825 + ], + "angle": 0, + "content": "Ability to Model Complex Feature Interactions. DNNs are particularly adept at capturing high-order, non-linear interactions between features, which may be challenging for traditional models like linear regression or decision trees [51], [54]. By learning a hierarchical representation of features, DNNs allow low-level feature interactions to be captured in the initial layers, while higher-order interactions are identified in deeper layers. This ability to automatically learn complex relationships makes DNNs highly effective in capturing intricate dependencies within tabular data." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.826, + 0.493, + 0.945 + ], + "angle": 0, + "content": "End-to-End Learning. Unlike traditional machine learning methods, which often involve separate steps for feature engineering, preprocessing, and model tuning, DNNs can process raw features and automatically extract useful representations without complex manual transformations. This end-to-end learning approach reduces human bias and simplifies the workflow, making the process more efficient. DNNs are trained through gradient optimization, enabling" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.272 + ], + "angle": 0, + "content": "a unified, streamlined solution for complex tasks [33], [107]. Additionally, deep models support multi-task learning, allowing related tasks to benefit from shared representations, enhancing both performance and efficiency [200], [70], [49]. Integration with Other Modalities. Deep tabular methods excel in multi-modal pipelines, where tabular data is integrated with other modalities, such as images, audio, or text. In AI4science applications, for instance, tabular data might be combined with image data [106], [107] (e.g., in medical imaging applications) or time-series data [201], [202] (e.g., in forecasting tasks). DNNs are well-suited to model interactions between heterogeneous data types, improving the overall performance. By jointly learning from multiple data sources, DNNs enhance their ability to make more accurate and comprehensive predictions across domains." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.272, + 0.927, + 0.446 + ], + "angle": 0, + "content": "Flexibility with Dynamic Environments. DNN-based methods benefit from the flexibility of gradient-based optimization, which allows efficient and iterative training. This flexibility makes DNNs adaptable to changing objectives without significant modifications, unlike tree-based models that often require specialized methods for different tasks [9]. Moreover, DNNs excel in dynamic environments, such as real-time predictions, financial analysis, and decision-making systems, where feature relationships may shift. This adaptability makes them suitable for online learning or incremental training, where new data is continuously integrated without retraining from scratch [203], [204]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.447, + 0.925, + 0.622 + ], + "angle": 0, + "content": "Long-Term Knowledge Transfer and Learning. DNNs are capable of long-term learning and knowledge transfer, which allows them to retain valuable knowledge gained from training on diverse tasks [205]. Once trained on a broad set of tasks, DNNs can transfer this knowledge to related domains, reducing the need for complete retraining [206]. This is especially advantageous in fields like AI4science, where a model trained on one type of scientific data can be adapted to other related domains, saving both time and computational resources. This ability to transfer knowledge across tasks is a key advantage of deep learning, enabling more efficient use of data and model capabilities over time." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.634, + 0.91, + 0.648 + ], + "angle": 0, + "content": "3.2 Debates between Tree-Based Methods and DNNs" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.652, + 0.925, + 0.768 + ], + "angle": 0, + "content": "Although deep tabular methods have shown great potential in learning semantic representations and constructing nonlinear predictors, their initial performance often struggles to surpass that of classical tree-based ensemble methods, such as Gradient Boosted Decision Trees (GBDT). Many studies still treat GBDT approaches as strong baselines [36], [39], and in some cases, the advantages of deep tabular methods diminish as the number of evaluation datasets increases." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.769, + 0.925, + 0.797 + ], + "angle": 0, + "content": "Several reasons contribute to why tree-based methods retain their advantages over DNNs in many tabular tasks:" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.798, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Better Handling of High-Frequency Data. Tree-based methods, particularly GBDT models, are highly efficient at handling high-frequency data or dense datasets with many small variations [38]. These models build decision trees by recursively splitting the data at the most informative feature points, capturing both local and global patterns efficiently. DNNs, on the other hand, may not capture fine-grained patterns as effectively without extensive regularization or tuning [207], [208]. To address this limitation, [38] introduced frequency reduction as an inductive bias through" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "8" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.053, + 0.493, + 0.099 + ], + "angle": 0, + "content": "the addition of scaling layers, while [45] demonstrated that periodic activation functions can significantly enhance neural networks' ability to learn high-frequency functions." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.106, + 0.493, + 0.268 + ], + "angle": 0, + "content": "Natural Handling of Mixed Data Types. Tabular data often includes a combination of numerical, categorical, and ordinal features [9], [44], [209]. Tree-based models are particularly strong when working with mixed data types, as they can handle categorical features directly without requiring one-hot encoding or embeddings. This ability to work with raw categorical data simplifies the preprocessing pipeline significantly. DNNs, however, generally require encoding techniques (e.g., one-hot encoding or learned embeddings) for categorical features, adding complexity and potentially leading to suboptimal performance [63]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.275, + 0.493, + 0.436 + ], + "angle": 0, + "content": "Lower Computational Requirements for Training and Inference. For certain tasks, tree-based models tend to be more computationally efficient than DNNs [33]. GBDTs and other decision tree-based models can train relatively quickly and are less computationally intensive than deep neural networks [210], [39]. This is especially true when the dataset is not massive or when the model needs to be trained and deployed rapidly. DNNs, on the other hand, often require significant computational resources (e.g., GPUs, longer training times) to achieve comparable performance, making them less ideal in resource-constrained environments [211], [88]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.444, + 0.493, + 0.576 + ], + "angle": 0, + "content": "Robustness to Noisy and Missing Data. Tree-based models are generally more robust to noisy data and missing values. When training a decision tree, missing values can be handled through optimal splitting that accommodates absent data, and trees can effectively deal with noisy or inconsistent data points [36]. DNNs, in contrast, are more sensitive to noise and often require careful preprocessing or specific techniques (e.g., data imputation or noise filtering) to avoid performance degradation with noisy or missing data [65], [89]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.585, + 0.495, + 0.803 + ], + "angle": 0, + "content": "Interpretability and Transparency. Tree-based models offer a significant advantage in terms of interpretability [60], [61], [105]. The decision-making process of models like GBDT can be easily visualized in the form of decision paths, and feature importance can be directly extracted [130], [132], [131]. This transparency makes tree-based models appealing in domains where model explainability is crucial, such as in finance, healthcare, and regulated industries. Although interpretability techniques like LIME [212] and SHAP [213] exist for DNNs, tree-based models still tend to be more intuitive and easier to explain, especially in complex decision-making environments. Recent works [214], [60], [59], [193] have sought to bridge this gap by enhancing neural network interpretability through emulation of tree-based model behaviors." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.811, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Handling Outliers and Skewed Data. Tree-based methods are often better at handling outliers and skewed distributions in the data. When a feature exhibits extreme values or skewed distributions, decision trees are inherently less sensitive to such anomalies because they create splits based on feature ranges that naturally isolate outliers. This characteristic can make them more robust than DNNs, which may require specialized loss functions or techniques (e.g., robust scaling or outlier removal) to handle such data points [43], [109]." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.053, + 0.857, + 0.068 + ], + "angle": 0, + "content": "4 TAXONOMY OF SPECIALIZED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.072, + 0.924, + 0.188 + ], + "angle": 0, + "content": "Similar to the evolution of deep learning, which progresses from specialized learning to transfer learning and ultimately to foundation models [244], we categorize deep tabular methods into three groups, as shown in Figure 2: specialized methods, transferable methods, and general methods. This classification reflects both the evolutionary development of deep learning techniques and the increasing generalization capabilities of these models." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.188, + 0.925, + 0.363 + ], + "angle": 0, + "content": "Specialized methods, being the earliest developed and most widely used category, will be our starting point for discussion. Tabular data consists of features (columns), samples (rows), and objectives (labels), which together define the structure and the task objectives. We emphasize detailed strategies for obtaining high-quality representations at both feature- and sample-level for the target task. Specifically, given the input data, according to the general learning objective in Equation 1, we consider how to transform the tabular input \\( x_{i} \\) (feature aspect), how to construct relationships between samples (sample aspect), how to design the objective \\( \\ell(\\cdot) \\) and regularize \\( \\Omega(\\cdot) \\) (objective aspect). In particular," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.364, + 0.925, + 0.465 + ], + "angle": 0, + "content": "- Feature Aspect. We focus on how to transform the raw tabular input (in various forms) into intermediate representations. We consider two types of features: numerical and categorical. By explicitly modeling the relationships between the two features (e.g., feature importance and interactions), we are able to enhance the model's understanding of the input space." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.465, + 0.925, + 0.553 + ], + "angle": 0, + "content": "- Sample Aspect. In addition to features, we explore how to retrieve and utilize neighboring samples to capture intersample dependencies, thereby improving predictions. In order to improve the model's ability to make predictions, we explore the relationships between a target sample and its \"extracted neighbors.\"" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.553, + 0.925, + 0.64 + ], + "angle": 0, + "content": "- Objective Aspect. We examine how to modify the loss function and overall objective to introduce inductive biases. By directly guiding the learning process with the target variables, we incorporate prior knowledge or task-specific preferences into the model, thereby improving its generalizability and interpretability." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.64, + 0.924, + 0.772 + ], + "angle": 0, + "content": "In specialized methods, we focus solely on learning from pure data, excluding feature semantics considered in transferable methods (in Section 6), as they leverage the capabilities of language models. Since specialized methods encompass a wide range of approaches, and feature-aspect methods are the most extensive part of them, we will first introduce sample-aspect methods and objective-aspect methods in the following subsections. In Section 5, we will provide a detailed introduction to feature-aspect methods." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.786, + 0.816, + 0.801 + ], + "angle": 0, + "content": "4.1 Sample-aspect Specialized Methods" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.804, + 0.924, + 0.892 + ], + "angle": 0, + "content": "Sample interaction methods take a retrieval-based approach, focusing on relationships between individual samples rather than features. In a tabular dataset, each sample \\( x_{i} \\) represents a row with \\( d \\) features, and the goal is to leverage relationships between a target sample and its \"extracted neighbors\" to improve predictions." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.892, + 0.924, + 0.921 + ], + "angle": 0, + "content": "The general form for the sample interaction methods can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.636, + 0.927, + 0.924, + 0.945 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {i} = f \\left(\\mathcal {R} \\left(\\boldsymbol {x} _ {i}, \\mathcal {D}; \\Phi\\right)\\right), \\tag {2}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.072, + 0.049, + 0.925, + 0.08 + ], + "angle": 0, + "content": "Table 1: The taxonomy of representation learning for tabular data. The shade color in the last column denotes the subcategory, which is consistent with Figure 3." + }, + { + "type": "table", + "bbox": [ + 0.126, + 0.089, + 0.869, + 0.328 + ], + "angle": 0, + "content": "
Algorithm CategoryReference
Specialized Methods§ 5Feature-aspect MethodsFeature Encoding[33], [45], [64]
Feature Selection[59], [60], [105], [61], [193]
Feature Projection[52], [33], [34], [58]
Feature Interaction[54], [62], [63], [55], [65], [49], [215]
§ 4.1Sample-aspect MethodsSample Interaction[70], [216], [217], [192], [67]
Neighbor Retrieval[218], [68], [69], [35]
§ 4.2Objective-aspect MethodsTraining Objective[67]
Training Regularization[219], [50], [66]
§ 6Transferable MethodsHomogeneous[63], [48], [70], [220], [46], [221], [222], [223], [47], [224], [225], [226], [227]
Heterogeneous[228], [229], [222], [72], [73], [64], [230], [231]
Language Model[77], [232], [182], [79], [78], [233], [234], [82], [83], [235], [236], [80], [237]
Vision Model[238], [239], [240], [74], [75], [241], [242], [76]
§ 7General MehtodsRaw-Feature-based[86], [87], [88]
TabPFN Variants[89], [91]
Semantics-based[92], [93], [94], [243]
" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.334, + 0.909, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.072, + 0.718, + 0.924, + 0.749 + ], + "angle": 0, + "content": "Figure 3: The roadmap of deep representation learning tabular methods. We organize representative methods chronologically to show the concentration at different stages. Different colors of these methods denote the sub-categories." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.773, + 0.491, + 0.864 + ], + "angle": 0, + "content": "where \\(\\mathcal{D}\\) is the set of all samples (training data) available for retrieval or learning. \\(\\mathcal{R}(\\cdot)\\) is the sample interaction module, which retrieves or aggregates information from relevant samples in \\(S\\) for the target sample \\(\\boldsymbol{x}_i\\). \\(\\Phi\\) represents the learnable parameters of \\(\\mathcal{R}\\). \\(f(\\cdot)\\) is the prediction head that maps the aggregated information to the final output \\(\\hat{y}_i\\)." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.87, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Sample aspect approaches can be broadly categorized into two main strategies. The first approach introduces the modeling of sample relationships \\(\\mathcal{R}\\) during representation training, allowing the model to learn better representations by capturing inter-sample dependencies. The second ap" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.773, + 0.924, + 0.819 + ], + "angle": 0, + "content": "proach is retrieval-based models, which directly predict outcomes by learning how to retrieve and utilize neighbors' relationships \\(\\mathcal{R}\\) when testing." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.822, + 0.923, + 0.896 + ], + "angle": 0, + "content": "Sample Interaction. These methods assist in representation learning by allowing the model to capture relationships between samples, which in turn helps generate a more robust representation during training. During testing, the model becomes more sensitive to each sample without interaction." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.923, + 0.944 + ], + "angle": 0, + "content": "SAINT [70] introduces inter-sample attention beyond inter-attribute attention, which improves row classification by relating each row to others in the table. NPT [216]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "10" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.228 + ], + "angle": 0, + "content": "extends this via non-parametric Transformers, whereas Hopular [217] employs Hopfield networks, sharing conceptual alignment with SAINT [70]. Unlike nearest-neighbor classification, the distance metric is learned end-to-end. Prompt [192] posits that the feature importance in tabular data is sample-dependent. During feature extraction, it treats the information between samples as prompts. PTaRL [67] identifies two issues in the representation of tabular data samples: entanglement and localization. It addresses these by modeling global sample relationships through prototype generation and representation projection, helping the model produce clear and consistent decisions." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.228, + 0.492, + 0.302 + ], + "angle": 0, + "content": "Neighbor Retrieval. These methods construct high-quality contexts to aid prediction by retrieving valuable neighbors and designing efficient ways to utilize them based on the relationships between samples. The training data is used to assist during testing." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.302, + 0.495, + 0.565 + ], + "angle": 0, + "content": "DNNR [68] argues that a key advantage of neighbor-based methods is the model's transparency, meaning that the model's decisions can be explained by inspecting its components. It enhances predictive performance by incorporating local gradient estimation and Taylor series approximation into the KNN framework. TabR [69] proposes that, compared to purely parametric (e.g., retrieval-free) models, retrieval-based models can achieve superior performance while also exhibiting several practically important properties, such as the ability for incremental learning and enhanced robustness. It encodes all candidate samples and then employs an attention-like mechanism to retrieve the samples that aid in making predictions, as explored in [218]. ModernNCA [35] revitalizes the classic tabular prediction method, Neighbourhood Component Analysis (NCA) [245], by designing and incorporating deep learning architectures and strategies. The resulting method efficiently leverages neighboring samples for prediction." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.567, + 0.495, + 0.582 + ], + "angle": 0, + "content": "Remark 4. The neighborhood-based approach closely resembles" + }, + { + "type": "text", + "bbox": [ + 0.095, + 0.582, + 0.495, + 0.729 + ], + "angle": 0, + "content": "bles the current in-context learning [246] mechanism. In particular, the in-context learning used in general models like TabPFN [89], [91] can aslo be considered a form of the neighborhood method. This concept of neighborhood not only helps in standard tasks, but also enhances transferable and general methods. For example, LoCalPFN [90] highlights that employing local linear regression can lead to more expressive decision boundaries, while utilizing local context allows performance to scale with the size of the training dataset." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.749, + 0.4, + 0.765 + ], + "angle": 0, + "content": "4.2 Objective-aspect Specialized Methods" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.767, + 0.492, + 0.842 + ], + "angle": 0, + "content": "The general objective learning \\( f \\) follows the structural risk minimization as in Equation 1, where \\( \\ell \\) is the loss function to set the training objective between the prediction and the ground truth label. \\( \\Omega(\\cdot) \\) is the regularization on the model, which directs the objective or restricts the complexity of \\( f \\)." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.841, + 0.493, + 0.945 + ], + "angle": 0, + "content": "In traditional machine learning, models often rely on explicit regularization techniques on \\(\\Omega\\) to ensure good generalization. Methods such as decision trees, support vector machines, and linear models typically incorporate regularization terms directly into the loss function to control model complexity and prevent overfitting. For example, in linear regression, regularization methods like L1 (Lasso) [247], L2" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.056, + 0.721, + 0.135 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.15, + 0.671, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.746, + 0.056, + 0.917, + 0.131 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.15, + 0.905, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.245, + 0.925, + 0.29 + ], + "angle": 0, + "content": "Figure 4: Illustration of feature-aspect methods, including feature encoding, feature selection, feature projection and feature interaction." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.305, + 0.924, + 0.351 + ], + "angle": 0, + "content": "(Ridge) [248], or Elastic-Nets [249] penalize large coefficients, effectively controlling the complexity of the model and helping to maintain a balance between bias and variance." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.352, + 0.925, + 0.44 + ], + "angle": 0, + "content": "Objective-aspect methods in deep learning are an extension of these traditional regularization techniques, where inductive bias is introduced by adjusting the loss function \\(\\ell\\) or adding regularizers \\(\\Omega\\). In the training process, the goal is to leverage regularization on the model to improve predictions." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.457, + 0.925, + 0.515 + ], + "angle": 0, + "content": "Remark 5. Pre-train methods such as homogeneous transferable tabular methods in Section 6 also change the loss function \\(\\ell\\) or the regularization \\(\\Omega\\) to help pre-training. We will discuss these methods later." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.543, + 0.924, + 0.618 + ], + "angle": 0, + "content": "Objective-aspect approaches can be broadly categorized into two main strategies. The first approach involves training objectives, which enhance the model with a specialized ability. The second approach introduces a regularizer, allowing the model to learn strong generalized representations." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.619, + 0.924, + 0.708 + ], + "angle": 0, + "content": "Training Objective. For training objectives, PTaRL [67] constructs prototype-based projection space and learns the disentangled representation around global prototypes. PTaRL uses a diversification constraint for representation calibration and introduces a matrix orthogonalization constraint to ensure the independence of global prototypes." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.709, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Training Regularization. For training regularization, RLNs [219] overcome the challenge of an intractable number of hyperparameters during training by introducing an efficient tuning scheme, which minimizes a new \"Counterfactual Loss.\" In RLNs, the regularization coefficients are optimized together with learning the network weight parameters. RLNs produce extremely sparse networks, thus providing more interpretable models and revealing the importance that the network assigns to different inputs. [50] introduces \"cocktails,\" dataset-specific combinations of 13 regularization techniques, showing that even simple neural networks can outperform tree-based architectures when optimized with these methods. TANGOS [66] introduces a regularization-based improvement. It regularizes neuron attributions to encourage neurons to specialize and become orthogonal to one another." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "11" + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.053, + 0.453, + 0.068 + ], + "angle": 0, + "content": "5 FEATURE-ASPECT SPECIALIZED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.072, + 0.493, + 0.189 + ], + "angle": 0, + "content": "Tabular data is characterized by a diverse set of features, including both categorical and numerical variables. The complexity of tabular data arises from the variety of feature types, their interrelationships, and the high dimensionality often present. Traditional methods often rely on manual feature engineering, using techniques such as encoding categorical variables and selecting relevant features to improve model performance and reduce overfitting." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.19, + 0.493, + 0.336 + ], + "angle": 0, + "content": "As deep learning has evolved, these traditional techniques have been integrated and expanded upon. Deep tabular models are capable of automatically learning complex feature representations, reducing the need for explicit feature engineering. Feature-aspect methods, such as feature encoding, selection, projection, and interaction, are essential for transforming raw tabular inputs into more informative intermediate forms. These methods help improve a model's ability to capture intricate relationships between features, thereby enhancing its generalization capabilities." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.355, + 0.248, + 0.37 + ], + "angle": 0, + "content": "5.1 Feature Encoding" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.373, + 0.493, + 0.447 + ], + "angle": 0, + "content": "Various encoding strategies have been explored for both categorical and numerical features in tabular data. Additionally, with the advancement of the attention mechanism, feature tokenization, similar to word embeddings in natural language processing, transforms all features into embeddings." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.448, + 0.492, + 0.534 + ], + "angle": 0, + "content": "Categorical Encoding. Categorical variables represent types of data which may be divided into groups. Examples of categorical variables are race, sex, age group, and educational level [250]. The categorical features are usually transformed in an index (integer). The two most popular techniques are an Ordinal Encoding and a One-Hot Encoding." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.535, + 0.493, + 0.71 + ], + "angle": 0, + "content": "Ordinal Encoding assigns each unique category a distinct integer value. This approach is useful when the categorical variable has an inherent order, such as \"low,\" \"medium,\" and \"high.\" The main advantage of Ordinal Encoding is its simplicity and efficiency, as it transforms the categorical variable into a single numeric column. However, it assumes that there is an ordinal relationship between the categories, which may not always be the case. For instance, if the categorical variable represents \"color\" with categories such as \"red,\" \"blue,\" and \"green,\" applying Ordinal Encoding would introduce an artificial order that does not reflect any meaningful ranking." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.71, + 0.493, + 0.914 + ], + "angle": 0, + "content": "On the other hand, One-Hot Encoding creates a new binary column for each unique category in the original categorical variable. For example, for a variable \"color\" with three categories (red, blue, and green), One-Hot Encoding would generate three binary columns: \"is_red,\" \"is_green,\" and \"is_green,\" encoding red as \\((1,0,0)\\), blue as \\((0,1,0)\\) and green as \\((0,0,1)\\). Each column indicates the presence or absence of that particular category. One-Hot Encoding is useful for nominal categorical variables, where no order exists between the categories. While One-Hot Encoding avoids the assumption of ordinal relationships, it can lead to a high-dimensional feature space if the categorical variable has many unique values, which may result in increased computational costs and potential issues with overfitting." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.914, + 0.493, + 0.944 + ], + "angle": 0, + "content": "In some cases, more advanced encoding techniques are used to address the limitations of these basic approaches." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.156 + ], + "angle": 0, + "content": "For example, Target Encoding assigns each category a value based on the mean of the target variable for that category. This method can be useful when there is a strong relationship between the categorical features and the target. In Leave-one-out embedding, every category is replaced with the mean of the target variable of that category, which excludes the current row to avoid overfitting." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.156, + 0.925, + 0.39 + ], + "angle": 0, + "content": "Numerical Encoding. For encoding, MLP-PLR [45] introduces two numerical encoding methods: Piecewise Linear Encoding (PLE) and Periodic Activation Functions. These encoding methods can be integrated with other differentiable layers (e.g., Linear, ReLU) to enhance performance. PLE produces alternative initial representations for the original scalar values and is based on feature binning. Periodic Activation Functions take into account the fact the embedding framework where all features are computed independently of each other forbids mixing features during the embedding process and train the pre-activation coefficients instead of keeping them fixed. [38] utilizes tools from spectral analysis, showing that functions described by tabular datasets often have high irregularity, and can be smoothed by transformations such as scaling and ranking to improve performance. They propose \"frequency reduction\" as an inductive bias during training." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.39, + 0.925, + 0.478 + ], + "angle": 0, + "content": "Feature Tokenization. Feature tokenizer performs a similar role to the feature extractor in traditional models. It transforms the input features to embeddings [62], [33]. Since the feature representations of features are very sparse and high-dimensional, a common way is to represent them into low-dimensional spaces (e.g., word embeddings)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.478, + 0.925, + 0.507 + ], + "angle": 0, + "content": "The general form for feature tokenization can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.613, + 0.508, + 0.923, + 0.526 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {T} _ {i, j} = \\boldsymbol {b} _ {j} + \\mathcal {T} \\left(x _ {i, j}; \\Psi\\right) \\in \\mathbb {R} ^ {t}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.534, + 0.925, + 0.607 + ], + "angle": 0, + "content": "where \\(\\mathcal{T}(\\cdot)\\) is the feature tokenizer module, which transforms the input feature vector \\(\\pmb{x}_i\\in \\mathbb{R}^d\\) to a token embedding \\(T_{i,j}\\in \\mathbb{R}^t\\) . \\(t\\) is the dimension of token embedding. \\(\\pmb{b}_{j}\\) is the \\(j\\) -th feature bias. \\(\\mathcal{T}\\) can be implemented with different forms. \\(\\Psi\\) represents the learnable parameters of \\(\\mathcal{T}\\)" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.608, + 0.925, + 0.945 + ], + "angle": 0, + "content": "In AutoInt [62], both the categorical and numerical features are embedded into low-dimensional spaces, which reduces the dimension of the input features and meanwhile allows different types of features to interact with each other. The embeddings of categorical features are computed by multiplying the embedding matrix with the multi-hot vector, while a corresponding embedding vector represents numerical features. TabTransformer [63] embed each categorical feature into a parametric embedding of dimension \\(t\\) using Column embedding. An embedding vector is assigned to each feature, and a set of embeddings is constructed for all categorical features. Unlike TabTransformer, SAINT [70] proposes projecting numerical features into a \\(t\\)-dimensional space before passing their embedding through the transformer encoder. FT-Transformer [33] adapts the Transformer architecture for tabular data, where all features are transformed to embeddings and applies a stack of Transformer layers to the embeddings. Specifically, the numerical tokenizer is implemented as the element-wise multiplication \\(\\boldsymbol{T}_i^{\\mathrm{num}} = \\boldsymbol{b}_i^{\\mathrm{num}} + x_i^{\\mathrm{num}} \\cdot \\boldsymbol{W}_i^{\\mathrm{num}}\\), and the categorical tokenizer is implemented as the lookup table \\(\\boldsymbol{T}_i^{\\mathrm{cat}} = \\boldsymbol{b}_i^{\\mathrm{cat}} + \\boldsymbol{e}_i^T \\boldsymbol{W}_i^{\\mathrm{cat}}\\), where \\(\\boldsymbol{e}_i^T\\) is a one-hot vector for the corresponding categorical feature. Other transformer-based" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "12" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.053, + 0.493, + 0.083 + ], + "angle": 0, + "content": "methods, like [65], [72], [230], [215], use the same feature tokenizer as FT-Transformer." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.101, + 0.248, + 0.115 + ], + "angle": 0, + "content": "5.2 Feature Selection" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.118, + 0.493, + 0.207 + ], + "angle": 0, + "content": "The high dimensionality of tabular data often causes overfitting, where the model focuses on irrelevant features and neglects the important ones. Feature selection reduces the number of features, retaining only the most valuable information. This helps prevent overfitting, improves generalization, and reduces computational complexity." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.208, + 0.493, + 0.337 + ], + "angle": 0, + "content": "Traditional tree-based models facilitate automatic feature selection by evaluating the impact of each feature on the target during the construction process. Decision trees utilize metrics such as information gain or the Gini index for feature selection, while ensemble methods like random forests determine feature importance by assessing each feature's contribution [251], [252], [253]. Recently, modern deep learning methods for tabular data often mimic trees' structures for feature selection." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.338, + 0.493, + 0.497 + ], + "angle": 0, + "content": "GrowNet [59] and NODE [60] primarily mimic ensemble techniques. Inspired by GBDT, GrowNet designs a framework for building DNNs with multiple weak learners, where each learner's input consists of the original features plus the penultimate layer output from the previous learner. NODE uses a differentiable Oblivious Decision Tree as the base model, applying Bagging within each layer and Stacking across layers in a multi-layered structure. To make GAM [254] scalable and effective, NODE-GAM [61] modifies NODE to be a GAM, allowing GAM to learn quick, nonlinear jumps that better match patterns in real data." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.498, + 0.493, + 0.675 + ], + "angle": 0, + "content": "TabNet [105] and GRANDE [193] focus more on how tree models handle features. TabNet not only retains the representation learning capabilities of DNNs through self-supervised learning, but also incorporates the interpretability of tree models and the benefits of sparse feature selection, with a model structure designed for both feature selection and computation. GRANDE argues that the hard splits used by tree models are a key advantage over deep models, and thus proposes a method for learning hard, axis-aligned tree ensembles using gradient descent. GRANDE combines the beneficial inductive bias of axis-aligned splits with the flexibility provided by gradient descent optimization." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.691, + 0.253, + 0.706 + ], + "angle": 0, + "content": "5.3 Feature Projection" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.71, + 0.491, + 0.812 + ], + "angle": 0, + "content": "Feature projection methods aim to project the raw data into a middle form, enhancing the representation ability for later architectures. Feature projection methods can be broadly categorized into two main approaches: MLP variants and special designed architectures. These approaches aim to enhance the model's ability to represent complex features for underlying feature structures." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.812, + 0.493, + 0.945 + ], + "angle": 0, + "content": "MLP Variants. For model architecture, RTDL [33] investigates both ResNet-like and Transformer-based architectures tailored for tabular data, proposing simple yet effective adaptations of these widely-used deep models. In particular, the MLP architecture is constructed by stacking multiple blocks consisting of Linear layers, ReLU activations, and Dropout, which transform the raw tabular features into a fixed-dimensional hidden representation. A final linear layer is then used as the classification head. The paper highlights" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.924, + 0.098 + ], + "angle": 0, + "content": "an important insight: with proper hyperparameter tuning, even simple architectures like MLP and ResNet can achieve competitive performance on tabular benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.098, + 0.925, + 0.315 + ], + "angle": 0, + "content": "Another contemporaneous work [50] enhances the MLP architecture by equipping it with a comprehensive suite of modern regularization techniques. Instead of introducing architectural innovations, this study focuses on systematically exploring combinations of 13 different regularization methods to identify an effective \"regularization cocktail\" for plain MLPs. The results demonstrate two key findings: (i) a well-regularized vanilla MLP can significantly outperform many recent, specialized neural architectures designed for tabular data; and (ii) such MLPs can even surpass strong traditional machine learning models like XGBoost across a range of benchmarks. For a more comprehensive strategy, RealMLP [34] explores multiple aspects including preprocessing, hyperparameters, architecture, regularization, and initialization." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.316, + 0.927, + 0.492 + ], + "angle": 0, + "content": "Special Designed Architectures. For units, motivated by the observation that normalization techniques are prone to disturbances during training, SNN [52] proposes the Scaled Exponential Linear Unit (SELU) to improve deep models for tabular data. NAMs [255] uses exp-centered (ExU) hidden units to improve the learnability for fitting jumpy functions. BiSHop [58] uses a dual-component approach, sequentially processing data both column-wise and row-wise through two interconnected directional learning modules. They use layers of generalized sparse modern Hopfield layers, a sparse extension of the modern Hopfield model with learnable sparsity." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.509, + 0.688, + 0.523 + ], + "angle": 0, + "content": "5.4 Feature Interaction" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.526, + 0.924, + 0.614 + ], + "angle": 0, + "content": "Feature interaction methods aim to model relationships among features to enhance the representation power of deep learning models on tabular data. In tabular datasets, each sample \\( \\boldsymbol{x}_i \\in \\mathbb{R}^d \\) is described by \\( d \\) features, and the goal is to transform these raw features into enriched representations that improve predictive performance." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.615, + 0.924, + 0.643 + ], + "angle": 0, + "content": "The general form for feature interaction methods can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.645, + 0.643, + 0.924, + 0.66 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {i} = f \\left(\\mathcal {H} \\left(\\boldsymbol {x} _ {i}; \\Theta\\right)\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.666, + 0.923, + 0.754 + ], + "angle": 0, + "content": "where \\(\\pmb{x}_i\\in \\mathbb{R}^d\\) is the input feature vector for a single instance, \\(\\mathcal{H}(\\cdot)\\) is the feature interaction module, which transforms the input \\(\\pmb{x}\\) by capturing feature dependencies or generating higher-order feature interactions. \\(\\Theta\\) represents the learnable parameters of \\(\\mathcal{H}\\). \\(f(\\cdot)\\) is the prediction head that maps the transformed representation to the final output \\(\\hat{y}\\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.754, + 0.923, + 0.84 + ], + "angle": 0, + "content": "Feature interaction methods can be broadly categorized into two main approaches: the design of automatic feature interaction modules and the mining of implicit feature relationships. These approaches aim to enhance the model's ability to learn complex feature interactions and underlying feature structures within tabular data." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.841, + 0.923, + 0.914 + ], + "angle": 0, + "content": "Automatic Feature Interaction Modules. These methods do not assume specific feature types within the tabular dataset. Instead, they focus on improving the feature interaction process, enabling the model to learn complex, high-order feature relationships autonomously." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.914, + 0.924, + 0.943 + ], + "angle": 0, + "content": "DCNv2 [54] improves the learning of the model's feature interaction by improving the \"Cross Network\" structure. It" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.258 + ], + "angle": 0, + "content": "employs low-rank methods to approximate feature crosses in subspaces and then integrates these subspaces using a gating mechanism. AutoInt [62] maps the original sparse high-dimensional feature vectors into a low-dimensional space and models high-order feature interactions by stacking interaction layers with a multi-head attention mechanism. Unlike AutoInt, the TabTransformer[63] only maps categorical features into contextual embeddings and feeds them into a Transformer model, while numerical continuous features are directly concatenated with the interacted contextual embeddings. When tabular data contains only numerical features, TabTransformer behaves in an MLP-like manner. Conversely, when the data contains only categorical features, TabTransformer operates similarly to AutoInt." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.258, + 0.493, + 0.331 + ], + "angle": 0, + "content": "Implicit Feature Relationships. Methods in this category typically assume that features in tabular data can be abstracted into implicit types and that it is necessary to design a suitable feature learning process to adapt to the characteristics of different types of features." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.331, + 0.493, + 0.609 + ], + "angle": 0, + "content": "DANets [55] propose the existence of underlying feature groups in tabular data, where features within each group are correlated. They learn to group input features and perform further feature abstraction. SwitchTab [49] introduces the idea of extracting sample-specific \"Salient Features\" and sample-shared \"Mutual Information\" in tabular features. It leverages self-supervised learning to assist in learning feature representations. ExcelFormer [65] argues that while DNN assigns weights to each feature, it does not actively exclude irrelevant features. To address this, it introduces Semi-Permeable Attention for feature interaction, which allows features with lower information content to access information from more informative features while preventing highly informative features from being influenced by less relevant ones. AMFormer [215] proposes the hypothesis that arithmetic feature interactions are crucial for deep tabular models. Based on the Transformer architecture, it introduces components designed to extract both additive and multiplicative interaction information." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.629, + 0.49, + 0.644 + ], + "angle": 0, + "content": "6 FROM SPECIALIZED TO TRANSFERABLE MODEL" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.648, + 0.493, + 0.736 + ], + "angle": 0, + "content": "Instead of training a tabular model from scratch, learning based on a Pre-Trained Model (PTM) may increase the learning efficacy and reduce the resource and data requirement. For example, in a house prices prediction task, training a regressor in a certain area may benefit from a well-trained predictor from its neighborhood." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.736, + 0.495, + 0.945 + ], + "angle": 0, + "content": "Learning by reusing the PTM usually contains two stages. The first is the pre-training of a tabular model, from one or more upstream tasks. Given the PTM and a downstream task, an adaptation strategy is needed to transform the PTM to the target task or facilitate the learning of the target model. Formally, a well-trained model \\( g_{\\Theta} \\) is often available and can be leveraged to facilitate the training of \\( f_{\\theta} \\) over \\( \\mathcal{D} \\). Here, \\( g_{\\Theta} \\) is pre-trained on a dataset \\( \\mathcal{D}' = \\{(x_j', y_j')\\}_{j=1}^{N'} \\) with instances \\( x_j' \\in \\mathbb{R}^{d'} \\) and labels \\( y_j' \\in [C'] \\). To reuse expert knowledge in \\( g_{\\Theta} \\), an adaptation strategy is applied: \\( f_{\\theta} = \\text{Adapt}(f_{\\theta_0} \\mid \\mathcal{D}, g_{\\Theta}) \\), where \\( \\theta_0 \\) is the initialization of the model. The notation could also be extended to cases with more than one PTM. The main challenge to reuse one or more PTMs is to bridge the gap between the PTM and the" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "target tabular model [256]. We categorize PTMs into three kinds based on the source of PTM \\( g_{\\Theta} \\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.083, + 0.923, + 0.158 + ], + "angle": 0, + "content": "Homogeneous Transferable Tabular Model. First, the PTM may come from the same form of task (with \\( d' = d \\) and \\( C' = C \\), but with different distributions \\( \\operatorname{Pr}(\\mathcal{D}') \\neq \\operatorname{Pr}(\\mathcal{D}) \\) or model families \\( g \\neq f \\)). For example, those pre-trained from other domains [71], or those unlabeled instances [48], [70]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.157, + 0.924, + 0.244 + ], + "angle": 0, + "content": "Heterogeneous Transferable Tabular Model. In addition, we consider a PTM pre-trained from a slightly different task with \\(\\mathcal{D}\\). In addition to the previous difference, the PTM \\(g_{\\Theta}\\) may differ from \\(f_{\\theta}\\) in feature dimension \\((d' \\neq d)\\) or target class set \\((C' \\neq C)\\), so the adaptation method \\(\\mathbf{Adapt}(\\cdot)\\) must handle such heterogeneity [64], [230]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.244, + 0.925, + 0.376 + ], + "angle": 0, + "content": "Cross-Modal Transferable Tabular Model. Moreover, the pre-trained model could also be constructed from another modality, such as vision and language domains. The cross-modality PTM is hard to be applied to the tabular prediction task in most cases, so auxiliary information from the tabular task like the semantic meaning of attributes (i.e., the attribute names) are usually assumed to be available in this case, where PTM like large language models may provide the latent semantic meanings as external knowledge [77], [73]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.376, + 0.925, + 0.697 + ], + "angle": 0, + "content": "The main limitation of the transferable tabular model is the assumption that the data distribution of the well-trained model should be similar to the distribution of the target model. For example in the previous house price prediction task, if the PTM is pre-trained in an area distance from the target area and targets diverse problems, it is hard to utilize the PTM in the target task [222]. Since different tabular tasks may vary in their distribution, feature, or classes, the general assumption is their exist a common \"dimension\" between the PTM and the target task. Only the distribution changes under the shared dimension and classes, or there exists an overlap between the feature or class spaces [230]. For example, in real-world applications such as healthcare, there are numerous medical diagnostic tables. These tables usually have some features in common such as blood type and blood pressure. For rare diseases with limited data, knowledge transfer from other diagnostic tables with overlapping features becomes beneficial [228]. When the feature/label semantics are available, two different tasks may be linked through the semantic space, and textual PTMs can be used to map the tabular instance to this space or facilitate the prediction in this space [80]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.697, + 0.925, + 0.844 + ], + "angle": 0, + "content": "Pros and Cons of transferable Models. Learning with a well-trained tabular model has several advantages based on the knowledge encoded in the PTM. First, the training efficiency of the target model is improved and the model may converge fast, as the PTM may provide better initialization weights or optimization paths. Then, the target model will reduce the requirement on the data size, i.e., learning with a few-shot dataset. Training based on a PTM also reduces the number of learnable parameters, leading to parameter-efficient tuning and reducing computational resources." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.864, + 0.86, + 0.88 + ], + "angle": 0, + "content": "6.1 Homogeneous Transferable Tabular Model" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.884, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Adapting a tabular model from another domain with different distributions is investigated in the field of unsupervised domain adaptation before the era of deep learning. One representative method is the biased regularization, which" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "14" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.062, + 0.482, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.21, + 0.496, + 0.286 + ], + "angle": 0, + "content": "Figure 5: Illustration of homogeneous transferable tabular methods. The pre-trained model could be constructed from supervised learning or self-supervised learning, which includes masked language model, contrastive pre-training, and hybrid methods." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.291, + 0.493, + 0.322 + ], + "angle": 0, + "content": "minimizes the difference between the weights of the PTM and the target model, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.074, + 0.326, + 0.493, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\boldsymbol {W}} \\ell (\\boldsymbol {W}) + \\| \\boldsymbol {W} - \\boldsymbol {W} ^ {\\prime} \\| _ {F} ^ {2} = \\min _ {\\Delta \\boldsymbol {W}} \\ell \\left(\\Delta \\boldsymbol {W} + \\boldsymbol {W} ^ {\\prime}\\right) + \\| \\Delta \\boldsymbol {W} \\| _ {F} ^ {2}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.36, + 0.493, + 0.506 + ], + "angle": 0, + "content": "\\(\\ell(W)\\) is the loss function on the current weights \\(W'\\), and the regularize constraint the distance between the target model \\(W\\) and the PTM weights \\(W'\\). We can reformulate the learning objective as learning the weights residual \\(\\Delta W\\). Biased regularization can be extended to the case where \\(f\\) and \\(g\\) are deep neural networks such as MLP, but it fails when the target model has a different architecture with the PTM. In this case, instead of matching two models through their weights, matching their predictions also helps. For example, twice learning [253] and knowledge distillation [257]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.507, + 0.493, + 0.594 + ], + "angle": 0, + "content": "Benefiting from the strong capacity of deep neural networks, some recent studies focus on pre-training a tabular model from unsupervised instances, and then adapting the model via fine-tuning the PTM on the target (even few-shot) labeled examples. This strategy could be applied in standard supervised learning or semi-supervised learning." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.594, + 0.493, + 0.725 + ], + "angle": 0, + "content": "Supervised Pre-training Objectives. A straightforward way to incorporate the target variable into the pre-training is by using the input corruption as an augmentation for the standard supervised learning objective. [71] identifies practices to pre-train tabular deep learning models that can be universally applied to different datasets and architectures. They show that using the object target labels during the pre-training stage benefits the downstream performance and advocates several target-aware pre-training objectives." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.725, + 0.493, + 0.783 + ], + "angle": 0, + "content": "Self-Supervised Pre-training Objectives. The self-supervised pre-training objectives can be mainly categorized into three categories, including the masked language model, contrastive pre-training, and hybrid methods." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.783, + 0.493, + 0.943 + ], + "angle": 0, + "content": "Masked Language Model (MLM). MLM is the unsupervised pre-training objective, where a random subset of features is masked for each sample, and the masked values are predicted in a multi-target classification manner [63]. VIME [48] estimates mask vectors from corrupted tabular data and reconstructs feature vectors for self-supervised learning. They use the trained encoder to generate multiple augmented samples for each data point by masking each point using several different masks and then imputing the corrupted values for each masked data point. SubTab [46] finds that reconstructing the data from the subset of its features rather" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.171 + ], + "angle": 0, + "content": "than its corrupted version in an autoencoder setting can better capture its underlying latent representation. SEFS [221] reconstructs the original input based on a randomly selected subset of input features, and simultaneously estimates the gate vector that defines which features are selected or not. MET [223] uses a concatenation of representations for all features instead of averaging and uses adversarial reconstruction loss in addition to the standard loss." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.192, + 0.927, + 0.543 + ], + "angle": 0, + "content": "Contrastive Pre-training. Contrastive pre-training uses data augmentations to generate positive pairs or two different augmented views of a given example, and the loss function encourages a feature extractor to map positive pairs to similar features. The key factor in contrastive learning is to generate positive and negative versions of a given instance \\( x_{i} \\). [70] utilizes CutMix [258] in the input space and Mixup [259] in the embedding space to obtain positive pairs, where other instances \\( x_{j \\neq i} \\) are treated as negative ones. SCARF [47] generates a view for a given input by selecting a random subset of its features and replacing them with random draws from their respective empirical marginal distributions. STab [224] relies on two (or multiple) weight-sharing neural networks with different regularizations applied to a single input. By exploiting the stop-gradient operation technique, STab can model invariance with respect to more complicated regularizations while it will not collapse to an undesired trivial solution. DoRA [226] incorporates domain knowledge, training by intra-sample pretext task and inter-sample contrastive learning to learn contextualized representations. DACL+ [220], to overcome the reliance on a particular domain, uses Mixup noise to create similar and dissimilar examples by mixing data samples differently either at the input or hidden-state levels." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.564, + 0.927, + 0.945 + ], + "angle": 0, + "content": "Hybrid Methods. [222] explores several pre-training strategies including both supervised and unsupervised ones. It considers MLM as the unsupervised pre-training objective, and sets multi-label classification as the supervised pre-training objective. By fine-tuning the PTM with several choices, including those with frozen feature extractor or not, the paper observes that supervised pre-training leads to more transferable features in the tabular domain. LFR [227] conducts pretraining by learning to simultaneously reconstruct multiple randomly generated projection functions. It considers diverse data types to show the wide-ranging applicability of learning from randomness, including tabular, vision, and language. ReConTab [225] utilizes both self-supervised learning and semi-supervised learning. It uses regularization techniques for raw feature selection and leverages contrastive learning with labels to distill the most pertinent information for downstream tasks. [71] focuses on the setup with fully labeled tabular datasets to understand if pretraining helps tabular deep learning in a fully supervised setting and compares pretraining methods to the strong supervised baselines. They show that using the object target labels during the pertaining stage is beneficial for the downstream performance and advocate several target-aware pretraining objectives. [256] provides a systematic review and summarizes the recent progress and challenges of self-supervised learning for non-sequential tabular data." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.412, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "15" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.063, + 0.488, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.217, + 0.495, + 0.292 + ], + "angle": 0, + "content": "Figure 6: Illustration of heterogeneous transferable tabular methods. During pre-training on one or multiple datasets, most of the parameters in the PTM are trained. For downstream tasks, only a small subset of parameters is fine-tuned while the rest remain fixed." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.303, + 0.434, + 0.318 + ], + "angle": 0, + "content": "6.2 Heterogeneous Transferable Tabular Model" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.326, + 0.492, + 0.4 + ], + "angle": 0, + "content": "The main intuition lies in the mapping \\( f \\) and \\( g \\) work in a similar fashion, i.e., predicting the labels with similar mechanisms. Therefore, the main idea to transfer knowledge is to match the target model with the well-trained one, over the weight space or the prediction space." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.4, + 0.493, + 0.663 + ], + "angle": 0, + "content": "Early methods mainly focus on the feature-level heterogeneity between \\( f \\) and \\( g \\). One main assumption is that there exists a shared set of features between the pre-trained task \\( \\mathcal{D}' \\) and the target task \\( \\mathcal{D} \\), then we may directly copy the weights corresponding to the shared features from the PTM. Some methods extend bias regularization to deal with heterogeneous feature spaces by padding the weights with zero. OPID [260] is a one-pass learning approach, which only needs to scan each instance once and to deal with evolving streams. In the pre-training stage, OPID compresses important information of vanished features into functions of survived features, and in the adaptation stage, it is expanded to include the augmented features. ReForm [261] learns the meta-representation for each feature and based on which calculates the relationship between features in the meta-representation space. ReForm then bridges the feature space gap through optimal transport, which could be further used to transform classifiers with different features and classes." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.664, + 0.493, + 0.797 + ], + "angle": 0, + "content": "A major advantage of neural models is that they are easily fine-tuned in new domains and learn reusable features. For example, as the deep PTM has the ability to extract generalizable features for a tabular task, reusing the knowledge from the PTM can utilize the strategies designed for visual and language domains. In detail, we can fix most of the parameters in the PTM and tune the remaining parts which only have limited parameters, for example, the linear probing or parameter-efficient fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.797, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Reuse PTM Pre-trained from One Dataset. These methods primarily focus on the difference between the pre-trained and down-streaming datasets. TabRet [72] utilizes masked autoencoding to make the transformer work in downstream tasks. To transfer pre-trained large language models to tabular tasks, ORCA [73] trains an embedder to align the source and target distributions. TabToken [64] focuses on improving the quality of the feature tokens, which are an important component in tabular deep models. TabToken leverages a conditional contrastive loss to improve the" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "quality of learned embeddings and demonstrates enhanced transferability of deep learning models for tabular data." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.083, + 0.923, + 0.346 + ], + "angle": 0, + "content": "Pseudo-Feature method [222] utilizes pseudo-feature models individually for each new feature. In detail, given one additional feature in a downstream dataset, it first pretrains a model on the upstream data without that feature. Then Pseudo-Feature fine-tunes the pre-trained model on downstream data to predict values in the column absent from the upstream data. Next, the fine-tuned model is used back in the upstream datasets to predict and assign pseudo-values of this feature. After supplementing the upstream dataset with the \"unseen\" feature in the downstream task, PseudoFeature pre-trains and transfers the feature extractor to the downstream task again. This method is computationally expensive in our broader feature space adaptation scenario. Reuse PTM Pre-trained from Multiple Datasets. XTab [230] aims to enhance the transferability of the transformer. They address the challenge of inconsistent column types and quantities among tables by utilizing independent features and federated learning to pre-train the shared component." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.346, + 0.924, + 0.651 + ], + "angle": 0, + "content": "Another thread of method learns shared components such as attribute-agnostic transformation across datasets, which provides a good model initialization for partial parameters given a downstream task. [228] infers latent representations of each attribute and each response from a few labeled instances using an inference network. The attribute and response representations are enabled make predictions based on the task-specific properties of attributes and responses even when attribute and response sizes are different across tasks. DEN [229] uses a three-block architecture: a covariate transformation block followed by a distribution embedding block and then a classification block. They provide theoretical insights to show that this architecture allows the embedding and classification blocks to be fixed after pre-training on a diverse set of tasks. Meta-Transformer [231] leverages a frozen encoder to perform multimodal perception without any paired multimodal training data. In Meta-Transformer, the raw input data from various modalities are mapped into a shared space in meta learning [262], allowing a subsequent encoder with frozen parameters to extract high-level semantic features." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.672, + 0.836, + 0.687 + ], + "angle": 0, + "content": "6.3 Reusing a Pre-trained Language Model" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.691, + 0.923, + 0.809 + ], + "angle": 0, + "content": "In some cases, the semantic meaning of features is available, making it natural to leverage pre-trained language models for tabular data. Typically, two types of semantic information can be derived from a tabular dataset \\(\\mathcal{D}\\). First, attribute names for each of the \\(d\\) features, \\(\\mathcal{A} = A_{1},\\ldots ,A_{d}\\), provide useful context. Additionally, meta-information such as a textual description, denoted as meta_description, can further enhance understanding. The learning process is then formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.598, + 0.817, + 0.924, + 0.834 + ], + "angle": 0, + "content": "\\[\n\\hat {y} _ {i} = f \\left(\\boldsymbol {x} _ {i}, \\mathcal {A} \\mid \\mathcal {D}, \\text {m e t a} _ {\\text {d e s c r i p t}}\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.841, + 0.925, + 0.885 + ], + "angle": 0, + "content": "where the semantic information bridges the gap between feature spaces and facilitates knowledge transfer from pretrained tasks to downstream applications." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.885, + 0.926, + 0.945 + ], + "angle": 0, + "content": "Although pre-trained language models have demonstrated success in various domains, their application to tabular data remains limited due to the prevalence of numerical values and the scarcity of textual descriptions." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "16" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.056, + 0.473, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.212, + 0.493, + 0.271 + ], + "angle": 0, + "content": "Figure 7: Illustration of transferable tabular methods with a language model. The language model can be applied at various stages, including feature tokenization, feature engineering, and textual serialization." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.282, + 0.492, + 0.34 + ], + "angle": 0, + "content": "Moreover, concerns about data privacy and security may further restrict access to semantic information. Consequently, language models are typically applied to tabular datasets only when textual context is sufficiently available." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.342, + 0.493, + 0.517 + ], + "angle": 0, + "content": "Language Models for Feature Tokenization. When the feature space changes, language-based methods assume that semantic relationships exist between feature descriptions and rely on large-scale language models to capture these connections. For example, the feature \"occupation\" in one task may share semantic similarity with the feature \"organization\" in another, allowing feature-label relationships to be reused across different datasets. By extracting feature embeddings (tokens), tables of varying sizes can be transformed into a standardized set of tokens in a shared space. A pre-trained transformer then encodes transferable knowledge, aiding the fine-tuning process for downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.518, + 0.493, + 0.751 + ], + "angle": 0, + "content": "TransTab [77] trains a tokenizer based on the words present in tabular data and incorporates both column descriptions and table cells as raw input to a gated transformer model. The model is pre-trained via self-supervised learning or supervised contrastive loss and is validated on tasks such as transfer learning and feature incremental learning. PTab [232] adopts a similar approach, learning contextual representations from multiple tokenized tabular datasets before fine-tuning for downstream tasks. UniTabE [182] encodes and fuses information from column names, data types, and cell values into a set of tokens, applying an encoder-decoder architecture with Transformer and LSTM components. It is pre-trained using Multi-Cell-Masking and contrastive learning, where a sub-vector of an instance is treated as a positive sample while other instances or their subsets are considered negatives." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.752, + 0.493, + 0.927 + ], + "angle": 0, + "content": "CM2 [79] introduces a cross-table pre-training framework that integrates attribute names and feature values. CM2 uses transformers to process feature tokens and employs a prompt-based Masked Table Modeling (pMTM) self-supervised objective, where column names act as prompts to assist in predicting masked features. TP-BERTa [78] follows a similar approach but incorporates numerical discretization strategies and magnitude tokenization for feature encoding, fine-tuning smaller pre-trained language models such as RoBERTa [263] for tabular data prediction. Its pre-training objective includes supervised loss and magnitude-aware triplet loss as a regularizer." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.928, + 0.492, + 0.943 + ], + "angle": 0, + "content": "CARTE [233] utilizes a graph representation of tabular" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.199 + ], + "angle": 0, + "content": "data to handle heterogeneous feature spaces, transforming textual information from column names and entries into embeddings. A graph-attentional network is then applied to contextualize entries with column names and neighboring entries. CARTE is pre-trained on the YAGO3 knowledge base [264] by constructing graphlets for tabular data and employing contrastive loss, where the original graphlet and one truncated variant are positives, while other graphlets in the batch serve as negatives. The pre-trained CARTE model is subsequently fine-tuned for downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.199, + 0.927, + 0.446 + ], + "angle": 0, + "content": "Language Models for Feature Engineering. Discriminative features enhance the effectiveness of subsequent tabular learning models. Binder [234] identifies task input components that are not directly answerable by a model and leverages LLMs to generate auxiliary features, particularly for knowledge grounding tasks. Given that discriminative features are often manually designed, CAAFE [265] explores the use of LLMs to generate auxiliary features based on task and feature semantics. The quality of these features is then evaluated using a general tabular model, TabPFN [89]. FeatLLM [266] enhances feature generation by incorporating example-based prompting, enabling LLMs to create new features based on textual descriptions. TaPTaP [235] is expected to capture a generic tabular data distribution after ongoing pre-training on a large-scale corpus of real-world tabular data, generating high-quality synthetic tables to support various applications on tabular data." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.447, + 0.926, + 0.679 + ], + "angle": 0, + "content": "Language Models for Textual Serialization. A direct approach to incorporating pre-trained language models involves converting tabular data into a textual format, allowing LLMs to infer relationships between features and labels based on embedded expert knowledge. This concept has been validated in semantic parsing tasks [267], [268]. LIFT [236] and TabLLM [80] serialize tabular data by integrating feature names into text and combining them with task descriptions. This enables LLMs to treat tabular prediction tasks as text generation problems. LIFT fine-tunes models on the entire training set, while TabLLM employs few-shot learning for fine-tuning. UniPredict [237] constructs prompts using metadata, sample serialization, and task instructions, fine-tuning LLMs with confidence-weighted augmented labels predicted by an external model. The approach is validated on multiple in-distribution datasets." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.679, + 0.925, + 0.797 + ], + "angle": 0, + "content": "Despite their advantages, textual serialization methods face challenges when the number of features increases, as prompts may become too large to fit within the model's context window. The effectiveness of LLMs in tabular data tasks remains constrained by the availability of semantic information and the capabilities of external tabular models. Further exploration of LLM-based methods will be discussed in the general tabular models in Section 7." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.808, + 0.81, + 0.823 + ], + "angle": 0, + "content": "6.4 Reusing a Pre-trained Vision Model" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.826, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Given the success of deep neural networks (DNNs) in visual tasks, it is intuitive to leverage the strong recognition capabilities of pre-trained vision models for tabular data. Additionally, data augmentation strategies commonly used in image processing can be introduced after transforming tabular data into a visual format. Similar ideas have been explored in time series forecasting [269] and irregular time series classification [270]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.043 + ], + "angle": 0, + "content": "17" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.058, + 0.487, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.194, + 0.493, + 0.253 + ], + "angle": 0, + "content": "Figure 8: Illustration of transferable tabular methods with a vision model. Tabular data can be transformed into images through dimensionality reduction, table reorganization, and the use of image markers." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.267, + 0.493, + 0.428 + ], + "angle": 0, + "content": "The primary challenge lies in representing tabular instances in an image-compatible format. In natural images, neighboring pixels often share semantic relationships, whereas tabular data lacks inherent spatial structure. Features in a tabular instance are permutation-invariant, meaning that exchanging their order does not alter the instance's meaning. Various methods have been proposed to transform tabular data into visual representations, enabling the application of pre-trained vision models fine-tuned for tabular tasks. This subsection highlights different transformation strategies that transfer tabular datasets into images." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.429, + 0.493, + 0.59 + ], + "angle": 0, + "content": "Dimensionality Reduction Transformation. Visualization strategies for tabular data naturally convert tables into images by embedding high-dimensional features into a lower-dimensional space. DeepInsight [238] projects tabular data into a 2D space using t-SNE and constructs images through convex hull analysis, applying translation, rotation, quantization, and normalization. REFINED [239] employs Bayesian Metric Multidimensional Scaling to preserve pairwise distances within the low-dimensional representation, ensuring that structurally similar features remain proximate in the transformed image." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.591, + 0.493, + 0.839 + ], + "angle": 0, + "content": "Table Reorganization Transformation. A tabular dataset \\(\\mathcal{D}\\) can be treated as a matrix and represented as a single-channel image or kernel. To enable visual PTMs to recognize meaningful spatial relationships, different strategies have been developed for structuring tabular data into images. Tabular Convolution (TAC) [240] arranges data samples into zero-mean square matrices (kernels) of odd integer dimensions. These kernels are then convolved with a fixed \"base image,\" and the resulting images are subsequently fed to a CNN for classification. Image Generator for Tabular Data (IGTD) [74] and TabEye [75] share a similar idea, generating an image for each data sample where pixel intensities correspond directly to feature values. These methods prioritize placing similar features in close proximity but struggle with high-dimensional tabular tasks. LM-IGTD [241] extends IGTD by incorporating stochastic feature generation to enhance robustness and generalization." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.841, + 0.492, + 0.945 + ], + "angle": 0, + "content": "Image Marker Transformation. Another approach involves encoding feature values as visual markers within an image. Super-TML [242] assigns feature values to predetermined positions within an image, effectively handling categorical and numerical datasets. Tab2Visual [76] normalizes tabular data and represents each instance as a row of multiple bars, each corresponding to a specific value. Each feature" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.083 + ], + "angle": 0, + "content": "is assigned a unique color to enhance visual differentiation, while bar widths are proportional to feature magnitudes." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.083, + 0.923, + 0.158 + ], + "angle": 0, + "content": "By transforming tabular data into images, these methods enable the application of powerful pre-trained vision models to tabular prediction tasks, leveraging established deep learning techniques from the vision domain to enhance tabular model performance." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.178, + 0.898, + 0.194 + ], + "angle": 0, + "content": "7 FROM TRANSFERABLE TO GENERAL MODEL" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.198, + 0.925, + 0.33 + ], + "angle": 0, + "content": "The general model (also referred to as the tabular foundation model) represents an advancement over the transferable model. It extends the generalization capabilities of a pretrained tabular model to a variety of heterogeneous downstream tabular tasks, regardless of their diverse feature and class spaces, without requiring additional fine-tuning. In other words, given a pre-trained model \\( g_{\\Theta} \\), it can be directly applied to a downstream tabular task \\( \\mathcal{D} \\) to predict the label of a test instance \\( x^{*} \\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.649, + 0.339, + 0.924, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\hat {y} ^ {*} = g _ {\\Theta} \\left(\\boldsymbol {x} ^ {*} \\mid \\mathcal {D}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.363, + 0.925, + 0.524 + ], + "angle": 0, + "content": "Thus, the general model shares similarities with the transferable tabular model, but with a greater emphasis on the \"zero-shot\" ability, aims to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. Importantly, it does not require an Adapt function, which further reduces the computational cost of hyper-parameter tuning. The goal of the general tabular model is to achieve better generalization on downstream tabular datasets \\(\\mathcal{D}\\) when compared to alternative strategies, such as training a tabular model directly on \\(\\mathcal{D}\\) or adapting a transferable model." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.527, + 0.925, + 0.732 + ], + "angle": 0, + "content": "Remark 6. Distinguishing between an advanced transferable tabular model, pre-trained on a wide range of heterogeneous tabular tasks, and the general tabular model can be challenging. Some transferable tabular models, based on auxiliary feature semantics, are able to predict labels for downstream test instances directly [80]. However, their prediction ability is constrained and typically applicable only in specific areas after fine-tuning [78], [233]. The general tabular model, on the other hand, is designed to handle a wider range of heterogeneous tabular tasks, sharing similar pre-training challenges with transferable models but without utilizing additional semantics. Fine-tuning a pre-trained general model is also an option for further performance improvements [93], [96]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.738, + 0.927, + 0.945 + ], + "angle": 0, + "content": "Pre-training has revolutionized domains such as vision and language [271], [84], but its adoption in tabular data remains limited due to the inherent heterogeneity of tabular datasets. Tabular datasets can vary significantly in both dimensionality (i.e., the number of columns) and the semantic meaning of each dimension, even within the same application. For example, different healthcare datasets may capture varying levels of detail and aspects of patient information. Even within the same feature entry (e.g., the \\(d\\)-th column), the meaning can vary (e.g., \"age\" vs. \"height\"). This contrasts with vision and text data (within the same language), where different data sources typically share the same \"vocabulary\" (e.g., pixels, patches, or sub-words) and similar relationships between vocabulary \"elements\" (e.g., neighboring pixels" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "18" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.055, + 0.28, + 0.162 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.056, + 0.492, + 0.162 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.072, + 0.171, + 0.493, + 0.248 + ], + "angle": 0, + "content": "Figure 9: Illustration of general methods. These methods handle inherent heterogeneity by improving the model's adaptability or homogenizing the diverse tabular formats. Once pre-trained, they can be directly applied to downstream tasks without fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.253, + 0.492, + 0.312 + ], + "angle": 0, + "content": "often share colors). The lack of shared vocabulary and relationships in tabular data makes it challenging to jointly train a model across multiple datasets, let alone apply a pre-trained model directly to new downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.312, + 0.492, + 0.459 + ], + "angle": 0, + "content": "There are two main strategies to address the inherent heterogeneity in tabular datasets: improving the model's adaptability or homogenizing the diverse tabular formats. We categorize general tabular models into three parts based on their strategies for achieving generalizability. The first focuses on raw-feature-based approaches, among which TabPFN variants represent a rapidly evolving branch and are thus discussed separately. The third category encompasses semantic-based methods that leverage attribute and task semantics to unify heterogeneous tasks." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.478, + 0.373, + 0.492 + ], + "angle": 0, + "content": "7.1 Raw-Feature-based General Models" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.496, + 0.491, + 0.642 + ], + "angle": 0, + "content": "To adapt a general tabular model to heterogeneous tabular datasets during the pre-training and fine-tuning stages, two main strategies can be used from the data-centric and model-centric perspectives. From the data-centric perspective, the general model may standardize tabular datasets into a homogeneous form. For instance, TabPTM [86] transforms all datasets into a uniform format using meta-representation to enable pre-training. The pre-trained model can then be applied directly to a downstream dataset or fine-tuned without introducing additional parameters." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.643, + 0.493, + 0.804 + ], + "angle": 0, + "content": "Alternatively, from the model-centric perspective, the general model may improve adaptability by tailoring it to specific tabular tasks. HyperFast [87] adopts the concept of a Hyper Network [272] in meta-learning [273], where a mapping from the tabular dataset to the weights of a classifier is learned. This mapping can then be used to predict labels for unseen instances from the task. To address datasets with varying dimensions, HyperFast projects datasets into a fixed size using random projections. To overcome the slow weight generation speed, MotherNet accelerates HyperFast by modifying its architecture with Transformer-like modules [88]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.822, + 0.238, + 0.835 + ], + "angle": 0, + "content": "7.2 TabPFN Variants" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.84, + 0.493, + 0.945 + ], + "angle": 0, + "content": "The TabPFN family of models [89], [91] leverages the incontext learning capabilities of transformers, directly predicting labels by adapting test instances according to the context of training examples. In the first version of TabPFN, an instance \\( \\boldsymbol{x}_i \\) is padded to a fixed dimension (e.g., 100), and the features are projected to a higher dimension (e.g., \\( d' \\)) for further processing. The label \\( y_i \\) is processed similarly and" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.242 + ], + "angle": 0, + "content": "added to the instance embeddings. The embeddings of all \\( N + 1 \\) instances, including training and test instances, are formulated into a set of \\( N + 1 \\) tokens with \\( d' \\) dimensions. These tokens are processed through several layers of a Transformer, and the output token corresponding to the test instance is further predicted using a 10-way classifier. TabPFN is pretrained over synthetically generated datasets with structured causal models (SCM) [274] and Bayesian Neural Networks (BNNs) [275], [276], enabling the strong in-context learning ability, with the best checkpoint selected based on some real-world datasets. Due to the high complexity of transformers, TabPFN is limited to small-scale tasks, with suggested sizes of \\( N < 1000 \\), \\( d < 100 \\), and \\( C < 10 \\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.243, + 0.927, + 0.563 + ], + "angle": 0, + "content": "TabPFN v2 introduces a specialized feature tokenizer to better handle heterogeneity. Specifically, each cell in the table is projected to a \\(k\\)-dimensional vector using a shared mapping, and random position encoding vectors are added to differentiate features [277]. This results in a tensor of size \\((N + 1) \\times (d + 1) \\times k\\) when there is a single test instance. The label of each instance is processed similarly, and the mapped \\(k\\)-dimensional token is concatenated with the instance tokens. A dummy label (e.g., the average of all labels) is used for the test instance since its label is unknown. A two-way attention mechanism is used, with each feature attending to the other features in its row and then attending to the same feature across its column [278]. The output token corresponding to the label of the test instance is further mapped to a 10-class classifier or regressor. Several improvements have been made in TabPFN v2, including increased context size (\\(N < 10000\\), \\(d < 500\\)), automatic feature engineering, and post-hoc ensemble methods. [279] analyzes TabPFN from a bias-variance perspective, shedding light on its generalization capabilities. Various applications have also been explored, including tabular data generation [280], anomaly detection [281], data augmentation [282], and time series forecasting [283]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.564, + 0.925, + 0.593 + ], + "angle": 0, + "content": "The improvements of TabPFN (especially TabPFN v1) stem from several aspects." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.593, + 0.925, + 0.886 + ], + "angle": 0, + "content": "Pre-training Improvements. TabForestPFN [284] extends TabPFN by pre-training In-Context Learning (ICL)-transformers on a new forest dataset generator that creates unrealistic datasets with complex decision boundaries. TabDPT [179] pre-trains the architecture on real-world datasets using self-supervised learning and retrieval objectives, making it suitable for both classification and regression tasks. APT [285] is pre-trained utilizing adversarial synthetic data generated by adaptive agents, which systematically modify the underlying data-generating distribution and deliberately challenge the model with diverse synthetic datasets to enhance its robustness and generalization capabilities. TabICL [286] integrates tree-based SCMs using XGBoost [130] to model complex interactions and employs curriculum learning by progressively increasing synthetic dataset sizes. Scalable Improvements. The efficiency of TabPFN is highly sensitive to context size, prompting strategies to enhance scalability and performance [39]. These include compressing training data into a compact learned representation using sketching [287] or prompt tuning techniques [288], [289]," + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.895, + 0.925, + 0.942 + ], + "angle": 0, + "content": "1. Some variants of TabPFN are not considered general tabular models, especially the latter parts, as they require additional fine-tuning steps. We place them in this subsection due to their strong relationship with TabPFN." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "19" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.127 + ], + "angle": 0, + "content": "employing adaptive data selection methods to identify the most pertinent training examples for each test instance [290], [90], [179], [291], and replacing traditional quadratic attention with computationally efficient linear attention mechanisms [292] and state-space models (SSMs) [293]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.128, + 0.493, + 0.288 + ], + "angle": 0, + "content": "Adaptation Improvements. Some approaches improve TabPFN's performance on downstream tasks by adapting the context [90] or fine-tuning specific parts of the model [96], [284], [290], [289]. TabICL [286] employs a column-then-row attention mechanism to construct fixed-dimensional embeddings of rows, which are subsequently processed by a transformer like TabPFN v1 to facilitate efficient in-context learning. EquiTabPFN [294] introduces self-attention across target components, ensuring that the arbitrary ordering of target dimensions does not influence model predictions, enhancing the performance of TabPFN v1 to some extent." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.309, + 0.357, + 0.323 + ], + "angle": 0, + "content": "7.3 Semantics-based General Models" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.329, + 0.495, + 0.665 + ], + "angle": 0, + "content": "By leveraging the semantic structure of tabular data, such as column names, heterogeneous tasks can be projected into a shared language space. This allows a single language model, pre-trained on diverse tabular datasets, to handle unseen tasks in a unified manner. TabuLa-8B [92] fine-tunes a Llama 3-8B LLM for tabular data prediction (classification and binned regression) using a novel packing and attention scheme for tabular prediction. GTL [93] transforms tabular datasets into an instruction-oriented language format, facilitating the continued pre-training of LLMs on instruction-oriented tabular data, which demonstrates strong performance in few-shot scenarios. GTL-S [295] unlocks the potential of GTL from a scaling perspective, revealing that scaling datasets and prediction tasks enhance generalization. [94] extends GTL by incorporating retrieval-augmented LLMs for tabular data, combined with retrieval-guided instruction-tuning for LLMs. MediTab [243] uses a data engine that leverages LLMs to consolidate tabular samples to overcome the barrier across tables with distinct schema. MediTab aligns out-domain data with the target task using a \"learn, annotate, and refinement\" pipeline, enabling the pre-trained model to infer for arbitrary tabular input in the domain without fine-tuning." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.687, + 0.367, + 0.703 + ], + "angle": 0, + "content": "8 TABULAR ENSEMBLE METHODS" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.709, + 0.493, + 0.812 + ], + "angle": 0, + "content": "Ensemble learning is a natural way to improve the generalization ability of multiple base learners by leveraging their diversity. Classical methods such as Random Forest [127] and AdaBoost [126], [296] employ bagging and boosting, respectively, by ensembling multiple decision trees. These methods have proven effective for tabular data, as they reduce bias/variance and improve robustness [297]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.812, + 0.493, + 0.945 + ], + "angle": 0, + "content": "In deep tabular learning, ensemble methods can be categorized into two primary approaches: joint-training ensembles, where multiple sub-networks are aggregated within a single training pipeline, and post-hoc ensembles, where the predictions from multiple pre-trained deep tabular models are fused. One major challenge in ensembling deep tabular methods is computational efficiency, as training multiple deep models or sub-models can be computationally expensive and time-consuming." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.054, + 0.738, + 0.069 + ], + "angle": 0, + "content": "8.1 Joint-Training Ensembles" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.075, + 0.925, + 0.192 + ], + "angle": 0, + "content": "Joint-training ensemble methods integrate diverse model architectures within a single training process to improve predictive performance while maintaining efficiency. These architectures often combine different types of models, such as linear and non-linear models [28] or tree-based and deep neural network-based approaches [63]. Tree-mimic methods leverage this concept by mixing predictions from multiple tree nodes to enhance robustness [60], [59], [193]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.193, + 0.927, + 0.382 + ], + "angle": 0, + "content": "To improve efficiency while maintaining predictive power, various techniques have been explored. Some approaches employ parameter-efficient ensembles, such as TabM [176], which uses MLPs as base learners and incorporates BatchEnsemble [298] to generate multiple diverse base learners efficiently. This prevents a large increase in the number of learnable parameters while maintaining model diversity. Similarly, BETA leverages pre-trained TabPFN by generating multiple base learners through additional parameter tuning [96]. Specifically, BETA learns multiple feature projections, feeding the projected training sets into TabPFN and aggregating the results while applying BatchEnsemble to reduce the number of additional learnable parameters." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.383, + 0.927, + 0.502 + ], + "angle": 0, + "content": "Some hybrid approaches, such as LLM-Boost and PFN-Boost, have been developed to integrate large language models and TabPFN with gradient-boosted decision trees [299]. In these approaches, LLMs and PFN serve as the initial base learners, and additional base learners are sequentially trained in a boosting manner. This approach leverages the strong prior knowledge from LLMs and TabPFN while maintaining the scalability of gradient-boosted decision trees." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.526, + 0.704, + 0.54 + ], + "angle": 0, + "content": "8.2 Post-Hoc Ensembles" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.548, + 0.927, + 0.722 + ], + "angle": 0, + "content": "Post-hoc ensemble (PHE) methods involve combining multiple trained models to improve robustness and accuracy. Bagging-based ensembles are one of the most direct post-hoc strategies, where usually multiple models trained with different random seeds are aggregated [33], [69]. Although this approach improves model robustness, it incurs high computational overhead. Some recent studies have demonstrated that LLM-based methods exhibit diverse prediction behaviors compared to deep tabular models that do not utilize attribute names [94]. This difference in prediction styles enhances their complementarity, making them ideal candidates for ensemble methods." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.724, + 0.927, + 0.943 + ], + "angle": 0, + "content": "Instead of explicitly training multiple models, perturbation-based approaches create diverse predictions from the same pre-trained model. One such method applies feature permutation with TabPFN, leveraging the fact that TabPFN is not fully feature permutation-invariant [89]. A perturbation-based ensemble can be formed by randomly permuting the feature order in both the training and test sets and making predictions multiple times, generating multiple diverse predictors without additional training costs. TabPFN v2 introduces additional perturbations to enhance diversity among several key factors, including variations in feature encoding, feature quantization, categorical feature shuffling, SVD-based feature compression, outlier removal, and power transformations such as the Yeo-Johnson transformation [91]. These randomly selected transformations create diverse" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "20" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.492, + 0.083 + ], + "angle": 0, + "content": "prediction patterns, enabling effective ensemble learning without requiring multiple separately trained models." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.084, + 0.493, + 0.304 + ], + "angle": 0, + "content": "Another post-hoc ensemble strategy employed in TabPFN v2 is the use of Portfolio-Based Ensemble, where a fixed set of TabPFN configurations is used [91]. A greedy ensemble selection technique is then applied to learn optimal weights for aggregating the predictions of different configurations [300]. By combining multiple perturbed models, this method improves generalization without excessive training costs. Some methods apply ensemble techniques to TabPFN v1 to handle large datasets. For instance, TabPFN-Bagging [96], [301] divides large datasets into multiple context groups, with the final results averaged to mitigate variance. BoostPFN [301] treats TabPFN v1 as weak learners, where each weak learner uses a subset of the training data as context. This approach allows BoostPFN to outperform standard Prior Fitted Networks (PFNs) on large datasets." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.332, + 0.212, + 0.348 + ], + "angle": 0, + "content": "9 EXTENSIONS" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.357, + 0.49, + 0.386 + ], + "angle": 0, + "content": "In this section, we briefly introduce some extensions on deep tabular methods across different complex tasks." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.387, + 0.493, + 0.591 + ], + "angle": 0, + "content": "Clustering. Traditional clustering approaches often leverage enhanced distance metrics, such as the Gower distance [302], which is specifically designed for mixed data types, and interpretable prototypes, such as K-medoids. Recent advances in tabular data clustering have sought to integrate interpretability constraints with deep representation learning. For example, IDC [97] introduces a deep learning framework for general tabular data that predicts interpretable cluster assignments at both the instance and cluster levels. To address overlapping clusters, TableDC [98] integrates the Mahalanobis distance, which accounts for variance and correlation within the data. This method provides a similarity measure suitable for tables, rows, or columns in high-dimensional latent spaces." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.592, + 0.493, + 0.811 + ], + "angle": 0, + "content": "Anomaly Detection. Anomaly detection in tabular data is crucial for identifying subtle irregularities in structured datasets, such as fraudulent transactions or equipment failures. While classical techniques like Isolation Forest [303] and Local Outlier Factor [304] remain foundational, recent developments have incorporated various methods to capture contextual relationships in high-dimensional data. For instance, [305] introduces a method that learns mappings that maximize mutual information between each sample and the part that is masked out, capturing the structural nuances of samples from a single training class. ADBench [99] provides a comprehensive tabular anomaly detection benchmark with 30 algorithms and 57 benchmark datasets. Additionally, large language models (LLMs) have also been employed for anomaly detection in tabular data [306]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.812, + 0.493, + 0.943 + ], + "angle": 0, + "content": "Tabular Generation. Tabular data generation has become an essential tool for synthetic data creation, privacy preservation, and addressing data scarcity. Traditional methods, such as Bayesian networks or GANs, focus on mimicking marginal distributions, while recent advancements emphasize preserving complex feature dependencies and semantic consistency. For instance, tabular diffusion models [307] iteratively refine synthetic data to capture subtle correlations in high-dimensional datasets, outperforming GANs in terms of data" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.17 + ], + "angle": 0, + "content": "fidelity. [308] introduces high-order structural causal information as a natural prior knowledge and offers a benchmark framework for evaluating tabular synthesis models. Despite these advances, challenges remain in balancing realism with privacy, such as avoiding identity leakage in sensitive datasets, and scaling to heterogeneous data types. Hybrid neuro-symbolic models [309] bridge this gap to provide trustworthy synthetic data for downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.171, + 0.923, + 0.389 + ], + "angle": 0, + "content": "Interpretability. Traditional gradient-boosted decision trees (GBDTs) inherently provide interpretability through feature importance scores and decision path visualization. Frameworks such as XGBoost [130] and LightGBM [131] quantify feature importance using metrics like split frequency and information gain. SHAP values [310] enable instance-level explanations by decomposing model predictions into feature contributions. The additive nature of GBDTs allows for partial dependence plots [311] to visualize feature effects while controlling for interactions. NeC4.5 [253], a novel decision tree algorithm that integrates the comprehensibility of decision trees with the generalization ability of neural network ensembles. By training a neural network ensemble to generate a new training set, NeC4.5 enhances decision tree performance while maintaining interpretability." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.389, + 0.923, + 0.68 + ], + "angle": 0, + "content": "Recent deep models specifically designed for tabular data have introduced novel interpretability mechanisms. For example, NAMs [255] combine some of the expressivity of DNNs with the inherent intelligibility of generalized additive models. They learn a linear combination of neural networks that each attend to a single input feature, which are trained jointly and can learn arbitrarily complex relationships between their input feature and the output. TabNet [105] uses sequential attention with learnable feature masks, where each decision step explicitly selects a subset of features via sparse masking. The aggregated feature usage across steps provides global interpretability comparable to GBDT's feature importance. Subsequent variants, such as TabTransformer [63], enhance interpretability by visualizing cross-feature attention patterns. FT-Transformer [33] combines feature tokenization with explainable attention, while NODE [60], NODE-GAM [61] and DOFEN [312] generalize ensembles of oblivious decision trees, benefiting from both end-to-end gradient-based optimization and multi-layer hierarchical representation learning." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.681, + 0.923, + 0.898 + ], + "angle": 0, + "content": "Open-Environment Tabular Machine Learning. Research on distribution shifts in tabular data starts with domain-to-domain shifts [110], which are commonly categorized based on the availability of target domain data. When target data is available, transfer learning techniques such as unsupervised domain adaptation [313] and test-time adaptation [314] are widely used. These methods adapt model parameters using test-time inputs but rely on access to target distributions, which may not always be feasible. In contrast, when target data is unavailable, a more practical but challenging scenario, methods aiming to enhance robustness and generalization, using approaches such as domain generalization [315], domain robustness [316], [317], label robustness [318] or ensemble strategies [95]. TableShift [110] provides a detailed analysis of this scenario." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.924, + 0.943 + ], + "angle": 0, + "content": "Beyond domain-to-domain shifts, temporal shifts are more general and complex. TabReD [109] emphasizes the inherent temporality of real-world tabular data, advocating" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.415, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "21" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.273 + ], + "angle": 0, + "content": "for temporal splits for training and testing. [319] further propose a refined training protocol focusing on temporal evaluation, significantly improving generalization across models. To address temporal shifts, it's critical to incorporate temporal information [319]. Drift-Resilient TabPFN [174] models temporal shifts with a secondary SCM, which specifies changes in the primary model parameters. [319] introduce a plug-and-play temporal embedding that effectively captures trend and periodicity patterns, providing an adaptive mechanism to mitigate the impact of temporal shifts. Under temporal shift conditions, most methods experience performance degradation, while TabM [95] exhibits relative robustness [109]. However, [319] demonstrate that with the refined training protocol and temporal embedding, methods such as ModernNCA [35] can regain competitiveness." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.282, + 0.493, + 0.516 + ], + "angle": 0, + "content": "Multi-modal Learning with Tabular Data. Text, such as feature names, can be effectively utilized to enhance tabular data learning, as discussed in Section 6. Here, we focus on interactions with the image modality, e.g., in healthcare, where medical images require specialized equipment and expert knowledge, often in tabular form, for accurate diagnosis [320]. To tackle challenges like large medical datasets and high annotation costs, MMCL [106] uses a contrastive self-supervised learning framework that integrates images and tabular data. CHARMS [107] transfers expert knowledge from tabular data to images, improving image predictions even without tabular data during inference, thus reducing reliance on costly expert annotations. TIP [321] proposes a self-supervised learning strategy with a tabular encoder for incomplete, heterogeneous data and a multimodal interaction module for inter-modality representation learning." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.524, + 0.493, + 0.744 + ], + "angle": 0, + "content": "Tabular Understanding. Tabular understanding involves comprehending the information contained within a table and can be broken down into several tasks. For example, Table Detection (TD) [322], [323] refers to identifying the region of the image that contains the table while Table Structure Recognition (TSR) [324], [325] involves the identification of the rows and columns to identify individual table cells, which aims to recognize the cellular structures of tables from table images by extracting the coordinates of cell boxes and row/column spanning information. Table Question Answering (TQA) [326], [327], [112] refers to providing precise answers from tables to answer a user's question. Traditional methods, whether OCR-based [328], [329], [330] or OCR-free [331], [332], [333], [334], [335], have made significant strides in TSR and TD, which are relatively simpler tasks." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.753, + 0.493, + 0.943 + ], + "angle": 0, + "content": "More complex tasks, such as TQA, have also been the focus of considerable effort. For example, Donut [332] proposes a novel task and a synthetic document image generator to pre-train the model, reducing reliance on large-scale real document images. Monkey and TextMonkey [336], [337] utilize shifted window attention and use similarity measures to filter out redundant tokens. mPLUG-DocOwl [338] adapts mPLUG-Owl for OCR-free document understanding, while TabPedia [335] constructs low- and high-resolution vision encoders with a concept synergy mechanism for visual table understanding. [339] focuses on exploring various table representations and directly prompting LLMs to improve performance. Please refer to [112], [113] for more details." + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.052, + 0.664, + 0.068 + ], + "angle": 0, + "content": "10 DISCUSSIONS" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.079, + 0.925, + 0.137 + ], + "angle": 0, + "content": "In this section, we discuss several possible future directions for tabular machine learning, particularly in light of the significant potential demonstrated by tabular general/foundation models." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.138, + 0.925, + 0.342 + ], + "angle": 0, + "content": "The Ability to Handle Dynamic and Open Environments. Tabular models, particularly foundation models, will increasingly need to operate in dynamic, real-world environments where data evolves over time [340]. One of the key challenges is dealing with imbalanced datasets [155], where certain classes may be underrepresented, and the distribution of data may shift over time [110]. As a result, models need to adapt to these changes and continue providing accurate predictions. Additionally, the emergence of new classes in the data may require the model to evolve and update its predictions in real-time [341]. This calls for methods that ensure tabular foundation models can accommodate evolving data, handling both new classes and changing distributions effectively." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.343, + 0.925, + 0.635 + ], + "angle": 0, + "content": "The Coverage and Scope of Tabular Foundation Models. Current tabular foundation models have demonstrated strong performance on various unseen classification and regression tasks. However, several important questions remain about their capabilities. For instance, in addition to in-context learning [246], are there other prediction strategies that could be employed to further enhance the versatility and performance of tabular foundation models? Beyond classification and regression, can these models be extended to handle related tasks such as clustering, imputation, outlier detection, or even table-based question answering (QA)? Expanding the task scope could increase the model's utility in a wide range of applications. Furthermore, it is worth investigating whether there is a scaling law [342] for tabular foundation models. Currently, tabular checkpoints are relatively small compared to foundation models in other modalities, such as language or vision. Understanding the implications of scaling these models—particularly the trade-offs between model size and performance—will be crucial for their future development." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.636, + 0.925, + 0.811 + ], + "angle": 0, + "content": "Will Foundation Models Always Help? While foundation models have demonstrated impressive generalization abilities, there are inherent trade-offs. Similar to ensemble learning, a single foundation model may provide an \"average\" predictive ability across tasks, potentially losing specialized expertise for specific tasks. To address this, a promising approach could be the development of a \"tabular model zoo\" [343], [344]. In this paradigm, different pre-trained models, potentially including models from other domains, could be combined for a specific tabular task. Given a new task, suitable pre-trained models could be selected, adapted if necessary, and integrated for improved performance." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.812, + 0.927, + 0.943 + ], + "angle": 0, + "content": "Model Efficiency. In many real-world applications, tabular datasets are large and high-dimensional, posing significant challenges for both training and inference [345], [44]. One area of concern is how to handle extreme cases, such as when the data is exceptionally large or sparse. Foundation models should be able to scale effectively in these scenarios without sacrificing performance. Another issue is inference speed. In large-scale problems, timely predictions are essential, especially when deployed in real-time environments [292]. Opti-" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "22" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.17 + ], + "angle": 0, + "content": "mizing the inference process is therefore critical to ensure that predictions can be made quickly on large, complex datasets. Lastly, the computational resources required for training and deploying foundation models can be substantial [346]. Optimizing resource usage through methods such as model pruning, quantization, and efficient training algorithms will be important to ensure that these models remain practical and accessible for a wide range of applications." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.171, + 0.493, + 0.36 + ], + "angle": 0, + "content": "Bridging the Gap Between Tabular Data and Other Modalities. Tabular data often coexists with other data modalities, such as images and text. One of the exciting challenges in the field is how to effectively integrate tabular data with foundation models from other domains [347]. Combining the strengths of tabular models with those of vision or language models could result in more powerful and versatile models capable of handling multimodal data. Exploring how to seamlessly integrate these modalities—whether through joint embeddings, cross-modal attention mechanisms, or other techniques—could unlock significant advances in tasks that require both structured tabular data and unstructured data sources like images or text." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.377, + 0.227, + 0.391 + ], + "angle": 0, + "content": "11 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.396, + 0.495, + 0.631 + ], + "angle": 0, + "content": "Tabular data remains a cornerstone of real-world machine learning applications, and the advancement of deep learning has opened new possibilities for effective representation learning in this domain. In this survey, we present a comprehensive overview of deep tabular representation learning, covering its background, challenges, evaluation benchmarks, and the discussion between tree-based models and DNNs. We systematically categorize existing methods into three categories—specialized, transferable, and general models—based on their generalization capabilities. In addition, we discuss ensemble techniques, extensions, and some promising future directions, such as open-environment and multimodal tabular learning. We hope this survey serves as a valuable reference for understanding the current state of the field and inspires further progress in developing more robust and generalizable tabular learning methods." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.647, + 0.188, + 0.661 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.075, + 0.667, + 0.493, + 0.701 + ], + "angle": 0, + "content": "[1] B. Kovalerchuk and E. Vityaev, Data mining in finance: advances in relational and hybrid methods. Springer Science & Business Media, 2005. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.702, + 0.493, + 0.748 + ], + "angle": 0, + "content": "[2] S. L. Hyland, M. Faltys, M. Hüser, X. Lyu, T. Gumbsch, C. Esteban, C. Bock, M. Horn, M. Moor, B. Rieck et al., \"Early prediction of circulatory failure in the intensive care unit using machine learning,\" Nature medicine, vol. 26, no. 3, pp. 364-373, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.748, + 0.493, + 0.783 + ], + "angle": 0, + "content": "[3] C. Romero and S. Ventura, \"Educational data mining: a review of the state of the art,\" IEEE Transactions on Systems, Man, and Cybernetics, vol. 40, no. 6, pp. 601-618, 2010. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.783, + 0.493, + 0.817 + ], + "angle": 0, + "content": "[4] X. Amatriain, A. Jaimes, N. Oliver, and J. M. Pujol, \"Data mining methods for recommender systems,\" in Recommender systems handbook. Springer, 2010, pp. 39-71. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.817, + 0.493, + 0.862 + ], + "angle": 0, + "content": "[5] R. Tibshirani, T. Hastie, B. Narasimhan, and G. Chu, \"Diagnosis of multiple cancer types by shrunken centroids of gene expression,\" Proceedings of the National Academy of Sciences, vol. 99, no. 10, pp. 6567-6572, 2002. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.862, + 0.493, + 0.895 + ], + "angle": 0, + "content": "[6] O. Ivanciuc et al., \"Applications of support vector machines in chemistry,\" Reviews in computational chemistry, vol. 23, p. 291, 2007. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.895, + 0.493, + 0.94 + ], + "angle": 0, + "content": "[7] N. K. Ahmed, A. F. Atiya, N. E. Gayar, and H. El-Shishiny, \"An empirical comparison of machine learning models for time series forecasting,\" Econometric reviews, vol. 29, no. 5-6, pp. 594-621, 2010." + }, + { + "type": "list", + "bbox": [ + 0.075, + 0.667, + 0.493, + 0.94 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.055, + 0.925, + 0.089 + ], + "angle": 0, + "content": "[8] M. R. Allen and D. A. Stainforth, \"Towards objective probabilistic climate forecasting,\" Nature, vol. 419, no. 6903, pp. 228-228, 2002. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.091, + 0.925, + 0.136 + ], + "angle": 0, + "content": "[9] V. Borisov, T. Leemann, K. Seßler, J. Haug, M. Pawelczyk, and G. Kasneci, \"Deep neural networks and tabular data: A survey,\" IEEE Transactions Neural Networks and Learning Systems, vol. 35, no. 6, pp. 7499-7519, 2024. 1, 4, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.137, + 0.908, + 0.149 + ], + "angle": 0, + "content": "[10] C. C. Aggarwal, Data Mining - The Textbook. Springer, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.149, + 0.923, + 0.171 + ], + "angle": 0, + "content": "[11] Z. Ji, Z. C. Lipton, and C. Elkan, \"Differential privacy and machine learning: a survey and review,\" CoRR, vol. abs/1412.7584, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.171, + 0.923, + 0.217 + ], + "angle": 0, + "content": "[12] M. F. Delgado, E. Cernadas, S. Barro, and D. G. Amorim, \"Do we need hundreds of classifiers to solve real world classification problems?\" Journal of Machine Learning Research, vol. 15, no. 1, pp. 3133-3181, 2014. 1, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.217, + 0.925, + 0.239 + ], + "angle": 0, + "content": "[13] C. Bishop, Pattern recognition and machine learning. Springer, 2006. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.24, + 0.925, + 0.274 + ], + "angle": 0, + "content": "[14] T. Hastie, R. Tibshirani, and J. H. Friedman, The Elements of Statistical Learning: Data Mining, Inference, and Prediction, 2nd Edition. Springer, 2009. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.274, + 0.925, + 0.298 + ], + "angle": 0, + "content": "[15] M. Mohri, A. Rostamizadeh, and A. Talwalkar, Foundations of Machine Learning. MIT Press, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.298, + 0.925, + 0.331 + ], + "angle": 0, + "content": "[16] K. P. Murphy, Probabilistic Machine Learning: An introduction, ser. Adaptive computation and machine learning series. MIT Press, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.332, + 0.925, + 0.367 + ], + "angle": 0, + "content": "[17] A. Voulodimos, N. Doulamis, A. Doulamis, E. Protopapadakis et al., \"Deep learning for computer vision: A brief review,\" Computational intelligence and neuroscience, vol. 2018, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.367, + 0.923, + 0.413 + ], + "angle": 0, + "content": "[18] D. W. Otter, J. R. Medina, and J. K. Kalita, \"A survey of the usages of deep learning for natural language processing,\" IEEE transactions on neural networks and learning systems, vol. 32, no. 2, pp. 604-624, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.413, + 0.923, + 0.457 + ], + "angle": 0, + "content": "[19] Y. Bengio, A. Courville, and P. Vincent, \"Representation learning: A review and new perspectives,\" IEEE transactions on pattern analysis and machine intelligence, vol. 35, no. 8, pp. 1798-1828, 2013. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.459, + 0.923, + 0.481 + ], + "angle": 0, + "content": "[20] Y. LeCun, Y. Bengio, and G. Hinton, \"Deep learning,\" nature, vol. 521, no. 7553, pp. 436-444, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.482, + 0.923, + 0.504 + ], + "angle": 0, + "content": "[21] I. Goodfellow, Y. Bengio, and A. Courville, Deep learning. MIT press, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.505, + 0.923, + 0.539 + ], + "angle": 0, + "content": "[22] J. Donahue, Y. Jia, O. Vinyals, J. Hoffman, N. Zhang, E. Tzeng, and T. Darrell, \"Decaf: A deep convolutional activation feature for generic visual recognition,\" in ICML, 2014, pp. 647-655. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.539, + 0.925, + 0.573 + ], + "angle": 0, + "content": "[23] G. E. Hinton and R. R. Salakhutdinov, \"Reducing the dimensionality of data with neural networks,\" science, vol. 313, no. 5786, pp. 504-507, 2006. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.574, + 0.923, + 0.597 + ], + "angle": 0, + "content": "[24] J. Weston, F. Ratle, and R. Collobert, \"Deep learning via semi-supervised embedding,\" in ICML, 2008, pp. 1168-1175. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.597, + 0.923, + 0.63 + ], + "angle": 0, + "content": "[25] L. Van Der Maaten, \"Learning a parametric embedding by preserving local structure,\" in AISTATS, 2009, pp. 384-391. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.631, + 0.923, + 0.665 + ], + "angle": 0, + "content": "[26] M. R. Min, L. Maaten, Z. Yuan, A. J. Bonner, and Z. Zhang, \"Deep supervised t-distributed embedding,\" in ICML, 2010, pp. 791-798. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.665, + 0.923, + 0.7 + ], + "angle": 0, + "content": "[27] W. Zhang, T. Du, and J. Wang, \"Deep learning over multi-field categorical data -- A case study on user response prediction,\" in ECIR, 2016, pp. 45-57. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.7, + 0.923, + 0.747 + ], + "angle": 0, + "content": "[28] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah, \"Wide & deep learning for recommender systems,\" in DLRS, 2016, pp. 7-10. 2, 4, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.747, + 0.923, + 0.77 + ], + "angle": 0, + "content": "[29] K. G. Mehrotra, C. K. Mohan, H. Huang, K. G. Mehrotra, C. K. Mohan, and H. Huang, Anomaly detection. Springer, 2017. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.77, + 0.923, + 0.803 + ], + "angle": 0, + "content": "[30] F. O. Isinkaye, Y. O. Folajimi, and B. A. Ojokoh, \"Recommendation systems: Principles, methods and evaluation,\" Egyptian informatics journal, vol. 16, no. 3, pp. 261-273, 2015. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.804, + 0.923, + 0.839 + ], + "angle": 0, + "content": "[31] S. S. Rangapuram, M. W. Seeger, J. Gasthaus, L. Stella, Y. Wang, and T. Januschowski, \"Deep state space models for time series forecasting,\" in NeurIPS, 2018, pp. 7796-7805. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.839, + 0.923, + 0.872 + ], + "angle": 0, + "content": "[32] B. Lim and S. Zohren, \"Time-series forecasting with deep learning: a survey,\" Philosophical Transactions of the Royal Society A, vol. 379, no. 2194, p. 20200209, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.872, + 0.923, + 0.907 + ], + "angle": 0, + "content": "[33] Y. Gorishniy, I. Rubachev, V. Khrulkov, and A. Babenko, \"Revisiting deep learning models for tabular data,\" in NeurIPS, 2021, pp. 18932-18943. 2, 3, 4, 6, 7, 8, 9, 11, 12, 19, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.907, + 0.923, + 0.942 + ], + "angle": 0, + "content": "[34] D. Holzmüller, L. Grinsztajn, and I. Steinwart, \"Better by default: Strong pre-tuned mlp's and boosted trees on tabular data,\" in NeurIPS, 2024, pp. 26577-26658. 2, 4, 5, 7, 9, 12" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.055, + 0.925, + 0.942 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "23" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.09 + ], + "angle": 0, + "content": "[35] H.-J. Ye, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, \"Revisiting nearest neighbor for tabular data: A deep tabular baseline two decades later,\" in ICLR, 2025. 2, 3, 4, 9, 10, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.091, + 0.492, + 0.126 + ], + "angle": 0, + "content": "[36] L. Grinsztajn, E. Oyallon, and G. Varoquaux, \"Why do tree-based models still outperform deep learning on typical tabular data?\" in NeurIPS, 2022, pp. 507-520. 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.149 + ], + "angle": 0, + "content": "[37] R. Shwartz-Ziv and A. Armon, \"Tabular data: Deep learning is not all you need,\" Information Fusion, vol. 81, pp. 84-90, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.149, + 0.492, + 0.183 + ], + "angle": 0, + "content": "[38] E. Beyazit, J. Kozaczuk, B. Li, V. Wallace, and B. Fadlallah, \"An inductive bias for tabular deep learning,\" in NeurIPS, 2023, pp. 43108-43135. 2, 7, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.184, + 0.492, + 0.23 + ], + "angle": 0, + "content": "[39] D. C. McElfresh, S. Khandagale, J. Valverde, V. P. C., G. Ramakrishnan, M. Goldblum, and C. White, \"When do neural nets outperform boosted trees on tabular data?\" in NeurIPS, 2023, pp. 76336-76369. 2, 5, 6, 7, 8, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.231, + 0.492, + 0.276 + ], + "angle": 0, + "content": "[40] H.-J. Ye, D.-C. Zhan, N. Li, and Y. Jiang, \"Learning multiple local metrics: Global consideration helps,\" IEEE transactions on pattern analysis and machine intelligence, vol. 42, no. 7, pp. 1698-1712, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.277, + 0.492, + 0.323 + ], + "angle": 0, + "content": "[41] S. M. Jesus, J. Pombal, D. Alves, A. F. Cruz, P. Saleiro, R. P. Ribeiro, J. Gama, and P. Bizarro, \"Turning the tables: Biased, imbalanced, dynamic tabular datasets for ML evaluation,\" in NeurIPS, 2022, pp. 33563-33575. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.324, + 0.492, + 0.359 + ], + "angle": 0, + "content": "[42] R. Kohli, M. Feurer, K. Eggensperger, B. Bischl, and F. Hutter, \"Towards quantifying the effect of datasets for benchmarking: A look at tabular machine learning,\" in ICLR Workshop, 2024. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.359, + 0.492, + 0.404 + ], + "angle": 0, + "content": "[43] A. Tschalzev, S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, \"A data-centric perspective on evaluating machine learning models for tabular data,\" in NeurIPS Datasets and Benchmarks Track, 2024. 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.405, + 0.492, + 0.44 + ], + "angle": 0, + "content": "[44] H.-J. Ye, S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and D.-C. Zhan, \"A closer look at deep learning on tabular data,\" CoRR, vol. abs/2407.00956, 2024. 2, 6, 7, 8, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.44, + 0.492, + 0.474 + ], + "angle": 0, + "content": "[45] Y. Gorishniy, I. Rubachev, and A. Babenko, \"On embeddings for numerical features in tabular deep learning,\" in NeurIPS, 2022, pp. 24991-25004. 2, 4, 8, 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.475, + 0.492, + 0.51 + ], + "angle": 0, + "content": "[46] T. Ucar, E. Hajiramezanali, and L. Edwards, \"Subtab: Subsetting features of tabular data for self-supervised representation learning,\" in NeurIPS, 2021, pp. 18853-18865. 2, 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.511, + 0.492, + 0.544 + ], + "angle": 0, + "content": "[47] D. Bahri, H. Jiang, Y. Tay, and D. Metzler, \"Scarf: Self-supervised contrastive learning using random feature corruption,\" in ICLR, 2022. 2, 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.545, + 0.492, + 0.58 + ], + "angle": 0, + "content": "[48] J. Yoon, Y. Zhang, J. Jordon, and M. van der Schaar, \"VIME: extending the success of self- and semi-supervised learning to tabular domain,\" in NeurIPS, 2020, pp. 11.033-11.043. 2, 9, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.58, + 0.492, + 0.626 + ], + "angle": 0, + "content": "[49] J. Wu, S. Chen, Q. Zhao, R. Sergazinov, C. Li, S. Liu, C. Zhao, T. Xie, H. Guo, C. Ji, D. Cociorva, and H. Brunzell, \"Switchtab: Switched autoencoders are effective tabular learners,\" in AAAI, 2024, pp. 15924-15933. 2, 7, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.627, + 0.492, + 0.661 + ], + "angle": 0, + "content": "[50] A. Kadra, M. Lindauer, F. Hutter, and J. Grabocka, \"Well-tuned simple nets excel on tabular datasets,\" in NeurIPS, 2021, pp. 23928-23941. 2, 4, 6, 9, 10, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.662, + 0.492, + 0.685 + ], + "angle": 0, + "content": "[51] R. Wang, B. Fu, G. Fu, and M. Wang, \"Deep & cross network for ad click predictions,\" in ADKDD, 2017, pp. 1-7. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.686, + 0.492, + 0.719 + ], + "angle": 0, + "content": "[52] G. Klambauer, T. Unterthiner, A. Mayr, and S. Hochreiter, \"Self-normalizing neural networks,\" in NIPS, 2017, pp. 971-980. 2, 9, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.72, + 0.492, + 0.744 + ], + "angle": 0, + "content": "[53] G. Ke, J. Zhang, Z. Xu, J. Bian, and T.-Y. Liu, \"Tabnn: A universal neural network solution for tabular data,\" 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.744, + 0.492, + 0.79 + ], + "angle": 0, + "content": "[54] R. Wang, R. Shivanna, D. Z. Cheng, S. Jain, D. Lin, L. Hong, and E. H. Chi, \"DCN V2: improved deep & cross network and practical lessons for web-scale learning to rank systems,\" in WWW, 2021, pp. 1785-1797. 2, 7, 9, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.79, + 0.492, + 0.826 + ], + "angle": 0, + "content": "[55] J. Chen, K. Liao, Y. Wan, D. Z. Chen, and J. Wu, \"Danets: Deep abstract networks for tabular data classification and regression,\" in AAAI, 2022, pp. 3930-3938. 2, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.826, + 0.492, + 0.86 + ], + "angle": 0, + "content": "[56] J. Chen, K. Liao, Y. Fang, D. Chen, and J. Wu, \"Tabcaps: A capsule neural network for tabular data classification with bow routing,\" in ICLR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.861, + 0.492, + 0.896 + ], + "angle": 0, + "content": "[57] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, \"Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's,\" in KDD, 2024, pp. 3679-3689. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.896, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[58] C. Xu, Y.-C. Huang, J. Y.-C. Hu, W. Li, A. Gilani, H.-S. Goan, and H. Liu, \"Bishop: Bi-directional cellular learning for tabular data with generalized sparse modern hopfield model,\" in ICML, 2024, pp. 55048-55075. 2, 7, 9, 12" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.09 + ], + "angle": 0, + "content": "[59] S. Badirli, X. Liu, Z. Xing, A. Bhowmik, and S. S. Keerthi, \"Gradient boosting neural networks: Grownet,\" CoRR, vol. abs/2002.07971, 2020. 2, 7, 8, 9, 12, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.924, + 0.125 + ], + "angle": 0, + "content": "[60] S. Popov, S. Morozov, and A. Babenko, “Neural oblivious decision ensembles for deep learning on tabular data,” in ICLR, 2020. 2, 8, 9, 12, 19, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.126, + 0.924, + 0.161 + ], + "angle": 0, + "content": "[61] C.-H. Chang, R. Caruana, and A. Goldenberg, \"NODE-GAM: neural generalized additive model for interpretable deep learning,\" in ICLR, 2022. 2, 3, 8, 9, 12, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.162, + 0.924, + 0.197 + ], + "angle": 0, + "content": "[62] W. Song, C. Shi, Z. Xiao, Z. Duan, Y. Xu, M. Zhang, and J. Tang, \"Autoint: Automatic feature interaction learning via self-attentive neural networks,\" in CIKM, 2019, pp. 1161-1170. 3, 7, 9, 11, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.198, + 0.924, + 0.233 + ], + "angle": 0, + "content": "[63] X. Huang, A. Khetan, M. Cvitkovic, and Z. S. Karnin, \"Tabransformer: Tabular data modeling using contextual embeddings,\" CoRR, vol. abs/2012.06678, 2020. 3, 7, 8, 9, 11, 13, 14, 19, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.234, + 0.924, + 0.268 + ], + "angle": 0, + "content": "[64] Q.-L. Zhou, H.-J. Ye, L. Wang, and D.-C. Zhan, \"Unlocking the transferability of tokens in deep models for tabular data,\" CoRR, vol. abs/2310.15149, 2023. 3, 9, 13, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.269, + 0.924, + 0.304 + ], + "angle": 0, + "content": "[65] J. Chen, J. Yan, Q. Chen, D. Z. Chen, J. Wu, and J. Sun, \"Can a deep learning model be a sure bet for tabular prediction?\" in KDD, 2024, pp. 288-296. 3, 7, 8, 9, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.305, + 0.924, + 0.34 + ], + "angle": 0, + "content": "[66] A. Jeffares, T. Liu, J. Crabbé, F. Imrie, and M. van der Schaar, \"Tangos: Regularizing tabular neural networks through gradient orthogonalization and specialization,\" in ICLR, 2023. 3, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.341, + 0.924, + 0.375 + ], + "angle": 0, + "content": "[67] H. Ye, W. Fan, X. Song, S. Zheng, H. Zhao, D. dan Guo, and Y. Chang, \"Ptarl: Prototype-based tabular representation learning via space calibration,\" in ICLR, 2024. 3, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.376, + 0.924, + 0.399 + ], + "angle": 0, + "content": "[68] Y. Nader, L. Sixt, and T. Landgraf, \"DNNR: differential nearest neighbors regression,\" in ICML, 2022, pp. 16296-16317. 3, 7, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.4, + 0.924, + 0.435 + ], + "angle": 0, + "content": "[69] Y. Gorishniy, I. Rubachev, N. Kartashev, D. Shlenskii, A. Kotelnikov, and A. Babenko, \"Tabr: Tabular deep learning meets nearest neighbors in 2023,\" in ICLR, 2024. 3, 6, 7, 9, 10, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.436, + 0.924, + 0.482 + ], + "angle": 0, + "content": "[70] G. Somepalli, A. Schwarzschild, M. Goldblum, C. B. Bruss, and T. Goldstein, \"SAINT: Improved neural networks for tabular data via row attention and contrastive pre-training,\" in NeurIPS Workshop, 2022. 3, 7, 9, 10, 11, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.483, + 0.924, + 0.517 + ], + "angle": 0, + "content": "[71] I. Rubachev, A. Alekberov, Y. Gorishniy, and A. Babenko, \"Revisiting pretraining objectives for tabular deep learning,\" CoRR, vol. abs/2207.03208, 2022. 3, 7, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.518, + 0.924, + 0.552 + ], + "angle": 0, + "content": "[72] S. Onishi, K. Oono, and K. Hayashi, \"Tabret: Pre-training transformer-based tabular models for unseen columns,\" CoRR, vol. abs/2303.15747, 2023. 3, 9, 12, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.553, + 0.924, + 0.588 + ], + "angle": 0, + "content": "[73] J. Shen, L. Li, L. M. Dery, C. Staten, M. Khodak, G. Neubig, and A. Talwalkar, \"Cross-modal fine-tuning: Align then refine,\" in ICML, 2023, pp. 31030-31056. 3, 9, 13, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.589, + 0.924, + 0.635 + ], + "angle": 0, + "content": "[74] Y. Zhu, T. Brettin, F. Xia, A. Partin, M. Shukla, H. Yoo, Y. A. Evrard, J. H. Doroshow, and R. L. Stevens, \"Converting tabular data into images for deep learning with convolutional neural networks,\" Scientific Reports, vol. 11, no. 11325, 2021. 3, 4, 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.636, + 0.924, + 0.659 + ], + "angle": 0, + "content": "[75] S. Lee and S.-C. Lee, \"Tableye: Seeing small tables through the lens of images,\" CoRR, vol. abs/2307.02491, 2023. 3, 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.66, + 0.924, + 0.705 + ], + "angle": 0, + "content": "[76] A. Mamdouh, M. El-Melegy, S. Ali, and R. Kikinis, \"Tab2visual: Overcoming limited data in tabular data classification using deep learning with visual representations,\" CoRR, vol. abs/2502.07181, 2025.3,9,17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.706, + 0.924, + 0.741 + ], + "angle": 0, + "content": "[77] Z. Wang and J. Sun, \"Transtab: Learning transferable tabular transformers across tables,\" in NeurIPS, 2022, pp. 2902-2915. 3, 9, 13, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.742, + 0.924, + 0.777 + ], + "angle": 0, + "content": "[78] J. Yan, B. Zheng, H. Xu, Y. Zhu, D. Z. Chen, J. Sun, J. Wu, and J. Chen, \"Making pre-trained language models great on tabular prediction,\" in ICLR, 2024. 3, 6, 9, 16, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.778, + 0.924, + 0.813 + ], + "angle": 0, + "content": "[79] C. Ye, G. Lu, H. Wang, L. Li, S. Wu, G. Chen, and J. Zhao, \"Towards cross-table masked pretraining for web data mining,\" in WWW, 2024, pp. 4449-4459. 3, 6, 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.814, + 0.924, + 0.859 + ], + "angle": 0, + "content": "[80] S. Hegselmann, A. Buendia, H. Lang, M. Agrawal, X. Jiang, and D. Sontag, \"Tabllm: few-shot classification of tabular data with large language models,\" in AISTATS, 2023, pp. 5549-5581. 3, 9, 13, 16, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.86, + 0.924, + 0.895 + ], + "angle": 0, + "content": "[81] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, \"From supervised to generative: A novel paradigm for tabular deep learning with large language models,\" in SIGKDD, 2024, pp. 3323-3333. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.896, + 0.924, + 0.941 + ], + "angle": 0, + "content": "[82] N. Hollmann, S. Müller, and F. Hutter, \"Large language models for automated data science: Introducing CAAFE for context-aware automated feature engineering,\" in NeurIPS, 2023, pp. 44753-44775. 3, 9" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.941 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "24" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.055, + 0.494, + 0.09 + ], + "angle": 0, + "content": "[83] S. Han, J. Yoon, S. Ö. Arik, and T. Pfister, \"Large language models can automatically engineer features for few-shot tabular learning,\" in ICML, 2024, pp. 17454-17479. 3, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.092, + 0.493, + 0.138 + ], + "angle": 0, + "content": "[84] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., \"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt,\" International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024. 3, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.139, + 0.492, + 0.173 + ], + "angle": 0, + "content": "[85] Y. Liang, H. Wen, Y. Nie, Y. Jiang, M. Jin, D. Song, S. Pan, and Q. Wen, \"Foundation models for time series analysis: A tutorial and survey,\" in SIGKDD, 2024, pp. 6555-6565. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.174, + 0.493, + 0.208 + ], + "angle": 0, + "content": "[86] H.-J. Ye, Q.-L. Zhou, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, \"Rethinking pre-training in tabular data: A neighborhood embedding perspective,\" CoRR, vol. abs/2311.00055, 2025. 3, 9, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.209, + 0.493, + 0.244 + ], + "angle": 0, + "content": "[87] D. Bonet, D. M. Montserrat, X. G. i Nieto, and A. G. Ioannidis, \"Hyperfast: Instant classification for tabular data,\" in AAAI, 2024, pp. 11 114-11 123. 3, 7, 9, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.245, + 0.493, + 0.278 + ], + "angle": 0, + "content": "[88] A. Müller, C. Curino, and R. Ramakrishnan, \"Mothernet: Fast training and inference via hyper-network transformers,\" in ICLR, 2025. 3, 8, 9, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.28, + 0.493, + 0.314 + ], + "angle": 0, + "content": "[89] N. Hollmann, S. Müller, K. Eggensperger, and F. Hutter, \"Tabpfn: A transformer that solves small tabular classification problems in a second,\" in ICLR, 2023. 3, 6, 7, 8, 9, 10, 16, 18, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.315, + 0.493, + 0.35 + ], + "angle": 0, + "content": "[90] V. Thomas, J. Ma, R. Hosseinzadeh, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, \"Retrieval & fine-tuning for in-context tabular models,\" in NeurIPS, 2024, pp. 108439-108467. 3, 10, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.351, + 0.493, + 0.397 + ], + "angle": 0, + "content": "[91] N. Hollmann, S. Müller, L. Purucker, A. Krishnakumar, M. Körfer, S. B. Hoo, R. T. Schirrmeister, and F. Hutter, \"Accurate predictions on small data with a tabular foundation model,\" Nature, vol. 637, no. 8045, pp. 319-326, 2025. 3, 9, 10, 18, 19, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.398, + 0.493, + 0.432 + ], + "angle": 0, + "content": "[92] J. Gardner, J. C. Perdomo, and L. Schmidt, \"Large scale transfer learning for tabular data via language modeling,\" in NeurIPS, 2024, pp. 45155-45205. 3, 6, 9, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.433, + 0.493, + 0.478 + ], + "angle": 0, + "content": "[93] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, \"From supervised to generative: A novel paradigm for tabular deep learning with large language models,\" in SIGKDD, 2024, pp. 3323-3333. 3, 9, 17, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.48, + 0.493, + 0.514 + ], + "angle": 0, + "content": "[94] X. Wen, S. Zheng, Z. Xu, Y. Sun, and J. Bian, \"Scalable in-context learning on tabular data via retrieval-augmented large language models,\" CoRR, vol. abs/2502.03147, 2025. 3, 9, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.515, + 0.493, + 0.55 + ], + "angle": 0, + "content": "[95] Y. Gorishniy, A. Kotelnikov, and A. Babenko, \"Tabm: Advancing tabular deep learning with parameter-efficient ensembling,\" CoRR, vol. abs/2410.24210, 2024. 3, 20, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.551, + 0.493, + 0.586 + ], + "angle": 0, + "content": "[96] S.-Y. Liu and H.-J. Ye, \"Tabpfn unleashed: A scalable and effective solution to tabular classification problems,\" CoRR, vol. abs/2502.02527, 2025. 3, 17, 19, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.587, + 0.493, + 0.611 + ], + "angle": 0, + "content": "[97] J. Svirsky and O. Lindenbaum, \"Interpretable deep clustering for tabular data,\" in ICML, 2024, pp. 47314-47330. 3, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.611, + 0.493, + 0.634 + ], + "angle": 0, + "content": "[98] H. T. Rauf, A. Freitas, and N. W. Paton, \"Tabledc: Deep clustering for tabular data,\" CoRR, vol. abs/2405.17723, 2024. 3, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.635, + 0.493, + 0.669 + ], + "angle": 0, + "content": "[99] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, \"Adbench: Anomaly detection benchmark,\" in NeurIPS, 2022, pp. 32142-32159. 3, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.67, + 0.493, + 0.694 + ], + "angle": 0, + "content": "[100] T. Shenkar and L. Wolf, \"Anomaly detection for tabular data with internal contrastive learning,\" in ICLR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.493, + 0.718 + ], + "angle": 0, + "content": "[101] J. Yin, Y. Qiao, Z. Zhou, X. Wang, and J. Yang, \"MCM: masked cell modeling for anomaly detection in tabular data,\" in ICLR, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.719, + 0.493, + 0.764 + ], + "angle": 0, + "content": "[102] L. Hansen, N. Seedat, M. van der Schaar, and A. Petrovic, \"Reimagining synthetic tabular data generation through data-centric AI: A comprehensive benchmark,\" in NeurIPS, 2023, pp. 33781-33823. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.765, + 0.493, + 0.811 + ], + "angle": 0, + "content": "[103] C. Hou, S. Gu, C. Xu, and Y. Qian, \"Incremental learning for simultaneous augmentation of feature and class,\" IEEE Transactions on pattern analysis and machine intelligence, vol. 45, no. 12, pp. 14789-14806, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.493, + 0.846 + ], + "angle": 0, + "content": "[104] M. Vero, M. Balunovic, and M. T. Vechev, \"Cuts: Customizable tabular synthetic data generation,\" in ICML, 2024, pp. 49408-49433. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.847, + 0.493, + 0.871 + ], + "angle": 0, + "content": "[105] S. Ö. Arik and T. Pfister, \"Tabnet: Attentive interpretable tabular learning,\" in AAAI, 2021, pp. 6679-6687. 3, 7, 8, 9, 12, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.871, + 0.493, + 0.907 + ], + "angle": 0, + "content": "[106] P. Hager, M. J. Menten, and D. Rueckert, \"Best of both worlds: Multimodal contrastive learning with tabular and imaging data,\" in CVPR, 2023, pp. 23924-23935. 3, 7, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.908, + 0.493, + 0.943 + ], + "angle": 0, + "content": "[107] J.-P. Jiang, H.-J. Ye, L. Wang, Y. Yang, Y. Jiang, and D.-C. Zhan, \"Tabular insights, visual impacts: Transferring expertise from tables to images,\" in ICML, 2024, pp. 21988-22009. 3, 7, 21" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.055, + 0.494, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.09 + ], + "angle": 0, + "content": "[108] Y. Diao, Y. Yang, Q. Li, B. He, and M. Lu, \"Oebench: Investigating open environment challenges in real-world relational data streams,\" VLDB, vol. 17, no. 6, pp. 1283-1296, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.924, + 0.125 + ], + "angle": 0, + "content": "[109] I. Rubachev, N. Kartashev, Y. Gorishniy, and A. Babenko, \"Tabred: A benchmark of tabular machine learning in-the-wild,\" CoRR, vol. abs/2406.19380, 2024. 3, 6, 8, 20, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.126, + 0.924, + 0.159 + ], + "angle": 0, + "content": "[110] J. Gardner, Z. Popovic, and L. Schmidt, \"Benchmarking distribution shift in tabular data with tableshift,\" in NeurIPS, 2024, pp. 53385-53432. 3, 20, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.16, + 0.924, + 0.194 + ], + "angle": 0, + "content": "[111] Z.-H. Zhou, \"Learnability with time-sharing computational resource concerns,\" National Science Review, vol. 11, no. 10, p. nwae204, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.195, + 0.924, + 0.219 + ], + "angle": 0, + "content": "[112] N. Jin, J. Siebert, D. Li, and Q. Chen, \"A survey on table question answering: recent advances,\" in CCKS, 2022, pp. 174-186. 3, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.219, + 0.924, + 0.265 + ], + "angle": 0, + "content": "[113] X. Fang, W. Xu, F. A. Tan, J. Zhang, Z. Hu, Y. Qi, S. Nickleach, D. Socolinsky, S. Sengamedu, and C. Faloutsos, \"Large language models (llms) on tabular data: Prediction, generation, and understanding-a survey,\" CoRR, vol. abs/2402.17944, 2024. 3, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.266, + 0.924, + 0.299 + ], + "angle": 0, + "content": "[114] C. Winship and R. D. Mare, \"Regression models with ordinal variables,\" American sociological review, vol. 49, no. 4, pp. 512-525, 1984. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.3, + 0.924, + 0.346 + ], + "angle": 0, + "content": "[115] P. A. Gutierrez, M. Perez-Ortiz, J. Sánchez-Monedero, F. Fernández-Navarro, and C. Hervás-Martínez, \"Ordinal regression methods: Survey and experimental study,\" IEEE Trans. Knowl. Data Eng., vol. 28, no. 1, pp. 127-146, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.924, + 0.392 + ], + "angle": 0, + "content": "[116] A. Jeffares, A. Curth, and M. van der Schaar, \"Deep learning through A telescoping lens: A simple model provides empirical insights on grokking, gradient boosting & beyond,\" in NeurIPS, 2024, pp. 123-498-123-533. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.393, + 0.924, + 0.428 + ], + "angle": 0, + "content": "[117] G. Cormode, P. Indyk, N. Koudas, and S. Muthukrishnan, \"Fast mining of massive tabular data via approximate distance computations,\" in ICDE, 2002, pp. 605-614. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.429, + 0.924, + 0.451 + ], + "angle": 0, + "content": "[118] M. D. Adelfio and H. Samet, \"Schema extraction for tabular data on the web,\" VLDB, vol. 6, no. 6, pp. 421-432, 2013. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.452, + 0.924, + 0.474 + ], + "angle": 0, + "content": "[119] J. F. Arias, A. K. Chhabra, and V. Misra, \"Efficient interpretation of tabular documents,\" in ICPR, 1996, pp. 681-685. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.475, + 0.924, + 0.509 + ], + "angle": 0, + "content": "[120] H.-L. Wang, S.-H. Wu, K. K. Wang, C.-L. Sung, W.-L. Hsu, and W.-K. Shih, \"Semantic search on internet tabular information extraction for answering queries,\" in CIKM, 2000, pp. 243-249. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.51, + 0.924, + 0.533 + ], + "angle": 0, + "content": "[121] M.-J. Nederhof, \"An optimal tabular parsing algorithm,\" in ACL, 1994, pp. 117-124. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.534, + 0.924, + 0.567 + ], + "angle": 0, + "content": "[122] J. F. Arias, A. K. Chhabra, and V. Misra, \"Interpreting and representing tabular documents,\" in CVPR, 1996, pp. 600-605. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.568, + 0.924, + 0.592 + ], + "angle": 0, + "content": "[123] G. Richards and V. J. Rayward-Smith, \"Discovery of association rules in tabular data,\" in ICDM, 2001, pp. 465-472. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.592, + 0.924, + 0.614 + ], + "angle": 0, + "content": "[124] J. R. Quinlan, \"Induction of decision trees,\" Machine learning, vol. 1, pp. 81-106, 1986. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.615, + 0.924, + 0.639 + ], + "angle": 0, + "content": "[125] L. Breiman, J. Friedman, R. Olshen, and C. J. Stone, Classification and Regression Trees. Chapman and Hall/CRC, 1984. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.639, + 0.924, + 0.673 + ], + "angle": 0, + "content": "[126] Y. Freund and R. E. Schapire, “A desicion-theoretic generalization of on-line learning and an application to boosting,” in EuroCOLT, 1995, pp. 23-37. 4, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.674, + 0.924, + 0.696 + ], + "angle": 0, + "content": "[127] L. Breiman, \"Random forests,\" Machine Learning, vol. 45, no. 1, pp. 5-32, 2001. 4, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.697, + 0.924, + 0.72 + ], + "angle": 0, + "content": "[128] J. H. Friedman, \"Greedy function approximation: a gradient boosting machine,\" Annals of statistics, pp. 1189-1232, 2001. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.721, + 0.924, + 0.744 + ], + "angle": 0, + "content": "[129] ——, \"Stochastic gradient boosting,\" Computational statistics & data analysis, vol. 38, no. 4, pp. 367-378, 2002. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.744, + 0.924, + 0.767 + ], + "angle": 0, + "content": "[130] T. Chen and C. Guestrin, \"Xgboost: A scalable tree boosting system,\" in KDD, 2016, pp. 785-794. 4, 8, 18, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.767, + 0.924, + 0.802 + ], + "angle": 0, + "content": "[131] G. Ke, Q. Meng, T. Finley, T. Wang, W. Chen, W. Ma, Q. Ye, and T.-Y. Liu, \"Lightgbm: A highly efficient gradient boosting decision tree,\" in NIPS, 2017, pp. 3146-3154. 4, 8, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.803, + 0.924, + 0.837 + ], + "angle": 0, + "content": "[132] L. O. Prokhorenkova, G. Gusev, A. Vorobev, A. V. Dorogush, and A. Gulin, \"Catboost: unbiased boosting with categorical features,\" in NeurIPS, 2018, pp. 6639-6649. 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.838, + 0.924, + 0.871 + ], + "angle": 0, + "content": "[133] D. Nielsen, \"Tree boosting with xgboost-why does xgboost win \"every\" machine learning competition?\" Master's thesis, NTNU, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.871, + 0.924, + 0.907 + ], + "angle": 0, + "content": "[134] S. Makridakis, E. Spiliotis, and V. Assimakopoulos, \"M5 accuracy competition: Results, findings, and conclusions,\" International Journal of Forecasting, vol. 38, no. 4, pp. 1346-1364, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.907, + 0.924, + 0.943 + ], + "angle": 0, + "content": "[135] H. Larochelle, D. Erhan, A. Courville, J. Bergstra, and Y. Bengio, \"An empirical evaluation of deep architectures on problems with many factors of variation,\" in ICML, 2007, pp. 473-480. 4" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "25" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.056, + 0.494, + 0.09 + ], + "angle": 0, + "content": "[136] R. Salakhutdinov and G. Hinton, \"Learning a nonlinear embedding by preserving class neighbourhood structure,\" in AISTATS, 2007, pp. 412-419. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.091, + 0.493, + 0.125 + ], + "angle": 0, + "content": "[137] R. Min, D. A. Stanley, Z. Yuan, A. Bonner, and Z. Zhang, “A deep non-linear feature mapping for large-margin knn classification,” in ICDM, 2009, pp. 357-366. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.126, + 0.493, + 0.159 + ], + "angle": 0, + "content": "[138] M. Ahmed, A. N. Mahmood, and J. Hu, \"A survey of network anomaly detection techniques,\" Journal of Network and Computer Applications, vol. 60, pp. 19-31, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.16, + 0.493, + 0.194 + ], + "angle": 0, + "content": "[139] L. Lu, M. Medo, C. H. Yeung, Y.-C. Zhang, Z.-K. Zhang, and T. Zhou, \"Recommender systems,\" Physics reports, vol. 519, no. 1, pp. 1-49, 2012. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.195, + 0.493, + 0.239 + ], + "angle": 0, + "content": "[140] D. Salinas, V. Flunkert, J. Gasthaus, and T. Januschowski, \"Deepar: Probabilistic forecasting with autoregressive recurrent networks,\" International journal of forecasting, vol. 36, no. 3, pp. 1181-1191, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.24, + 0.493, + 0.274 + ], + "angle": 0, + "content": "[141] T.-J. Huang, X.-Y. Chen, and H.-J. Ye, \"Seqfusion: Sequential fusion of pre-trained models for zero-shot time-series forecasting,\" CoRR, vol. abs/2503.02836, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.275, + 0.493, + 0.298 + ], + "angle": 0, + "content": "[142] Q. Liu, F. Yu, S. Wu, and L. Wang, \"A convolutional click prediction model,\" in CIKM, 2015, pp. 1743-1746. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.299, + 0.493, + 0.332 + ], + "angle": 0, + "content": "[143] H. Guo, R. Tang, Y. Ye, Z. Li, and X. He, \"Deepfm: A factorization-machine based neural network for CTR prediction,\" in IJCAI, 2017, pp. 1725-1731. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.333, + 0.493, + 0.366 + ], + "angle": 0, + "content": "[144] S. Somvanshi, S. Das, S. A. Javed, G. Antariksa, and A. Hossain, \"A survey on deep tabular learning,\" CoRR, vol. abs/2410.12034, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.367, + 0.493, + 0.39 + ], + "angle": 0, + "content": "[145] D. Lane, D. Scott, M. Hebl, R. Guerra, D. Osherson, and H. Zimmer, Introduction to statistics. CiteSeer, 2003. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.391, + 0.493, + 0.423 + ], + "angle": 0, + "content": "[146] A. F. Karr, A. P. Sanil, and D. L. Banks, \"Data quality: A statistical perspective,\" Statistical Methodology, vol. 3, no. 2, pp. 137-173, 2006. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.425, + 0.493, + 0.47 + ], + "angle": 0, + "content": "[147] A. Sánchez-Morales, J.-L. Sancho-Gómez, J.-A. Martínez-García, and A. R. Figueiras-Vidal, \"Improving deep learning performance with missing values via deletion and compensation,\" Neural Computing and Applications, vol. 32, pp. 13233-13244, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.471, + 0.493, + 0.504 + ], + "angle": 0, + "content": "[148] D. Chicco, L. Oneto, and E. Tavazzi, \"Eleven quick tips for data cleaning and feature engineering,\" PLOS Computational Biology, vol. 18, no. 12, p. e1010718, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.505, + 0.493, + 0.539 + ], + "angle": 0, + "content": "[149] Y. Luo, M. Wang, H. Zhou, Q. Yao, W.-W. Tu, Y. Chen, W. Dai, and Q. Yang, \"Autocross: Automatic feature crossing for tabular data in real-world applications,\" in KDD, 2019, pp. 1936-1945. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.54, + 0.493, + 0.573 + ], + "angle": 0, + "content": "[150] H. He and E. A. Garcia, \"Learning from imbalanced data,\" IEEE Transactions on knowledge and data engineering, vol. 21, no. 9, pp. 1263-1284, 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.574, + 0.493, + 0.597 + ], + "angle": 0, + "content": "[151] H. He and Y. Ma, Imbalanced learning: foundations, algorithms, and applications. John Wiley & Sons, 2013. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.598, + 0.493, + 0.62 + ], + "angle": 0, + "content": "[152] T. Lin, P. Goyal, R. B. Girshick, K. He, and P. Dollar, \"Focal loss for dense object detection,\" in ICCV, 2017, pp. 2999-3007. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.621, + 0.493, + 0.654 + ], + "angle": 0, + "content": "[153] J. M. Johnson and T. M. Khoshgoftaar, \"Survey on deep learning with class imbalance,\" Journal of big data, vol. 6, no. 1, pp. 1-54, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.655, + 0.493, + 0.689 + ], + "angle": 0, + "content": "[154] J. Engelmann and S. Lessmann, \"Conditional Wasserstein gan-based oversampling of tabular data for imbalanced learning,\" Expert Systems with Applications, vol. 174, p. 114582, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.69, + 0.493, + 0.724 + ], + "angle": 0, + "content": "[155] R. Sauber-Cole and T. M. Khoshgoftaar, \"The use of generative adversarial networks to alleviate class imbalance in tabular data: a survey,\" Journal of Big Data, vol. 9, no. 1, p. 98, 2022. 5, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.725, + 0.493, + 0.758 + ], + "angle": 0, + "content": "[156] X.-Y. Liu, J. Wu, and Z.-H. Zhou, \"Exploratory undersampling for class-imbalance learning,\" IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550, 2008. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.759, + 0.493, + 0.793 + ], + "angle": 0, + "content": "[157] N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer, \"SMOTE: synthetic minority over-sampling technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.794, + 0.493, + 0.839 + ], + "angle": 0, + "content": "[158] A. Fernández, S. García, F. Herrera, and N. V. Chawla, \"SMOTE for learning from imbalanced data: Progress and challenges, marking the 15-year anniversary,\" Journal of Artificial Intelligence Research, vol. 61, pp. 863-905, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.839, + 0.493, + 0.873 + ], + "angle": 0, + "content": "[159] K. Cao, C. Wei, A. Gaidon, N. Arechiga, and T. Ma, \"Learning imbalanced datasets with label-distribution-aware margin loss,\" in NeurIPS, 2019, pp. 1567-1578. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.874, + 0.493, + 0.907 + ], + "angle": 0, + "content": "[160] Y. Cui, M. Jia, T.-Y. Lin, Y. Song, and S. Belongie, \"Class-balanced loss based on effective number of samples,\" in CVPR, 2019, pp. 9268-9277. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.908, + 0.493, + 0.943 + ], + "angle": 0, + "content": "[161] Y. Xie, Z. Wang, Y. Li, B. Ding, N. M. Gurel, C. Zhang, M. Huang, W. Lin, and J. Zhou, \"Fives: Feature interaction via edge search for large-scale tabular data,\" in SIGKDD, 2021, pp. 3795-3805. 5" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.056, + 0.494, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.09 + ], + "angle": 0, + "content": "[162] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, \"Annotatedtables: A large tabular dataset with language model annotations,\" CoRR, vol. abs/2406.16349, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.924, + 0.125 + ], + "angle": 0, + "content": "[163] A. Klein and F. Hutter, \"Tabular benchmarks for joint architecture and hyperparameter optimization,\" CoRR, vol. abs/1905.04970, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.127, + 0.924, + 0.162 + ], + "angle": 0, + "content": "[164] P. Pokhrel, \"A comparison of automl hyperparameter optimization tools for tabular data,\" Ph.D. dissertation, Youngstown State University, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.163, + 0.924, + 0.186 + ], + "angle": 0, + "content": "[165] F. Hutter, L. Kotthoff, and J. Vanschoren, Automated machine learning: methods, systems, challenges. Springer Nature, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.187, + 0.924, + 0.21 + ], + "angle": 0, + "content": "[166] X. He, K. Zhao, and X. Chu, \"Automl: A survey of the state-of-the-art,\" Knowledge-based systems, vol. 212, p. 106622, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.211, + 0.924, + 0.245 + ], + "angle": 0, + "content": "[167] M. Feurer, K. Eggensperger, S. Falkner, M. Lindauer, and F. Hutter, \"Auto-sklearn 2.0: Hands-free automl via meta-learning,\" Journal of Machine Learning Research, vol. 23, no. 261, pp. 1-61, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.246, + 0.924, + 0.28 + ], + "angle": 0, + "content": "[168] C. Mennella, U. Maniscalco, G. De Pietro, and M. Esposito, \"Ethical and regulatory challenges of ai technologies in healthcare: A narrative review,\" Heliyon, vol. 10, no. 4, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.281, + 0.924, + 0.316 + ], + "angle": 0, + "content": "[169] W. Moore and S. Frye, \"Review of hipaa, part 1: history, protected health information, and privacy and security rules,\" Journal of nuclear medicine technology, vol. 47, no. 4, pp. 269-272, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.924, + 0.351 + ], + "angle": 0, + "content": "[170] D. F. Sittig and H. Singh, \"Legal, ethical, and financial dilemmas in electronic health record adoption and use,\" Pediatrics, vol. 127, no. 4, pp. e1042-e1047, 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.352, + 0.924, + 0.398 + ], + "angle": 0, + "content": "[171] J. Amann, A. Blasimme, E. Vayena, D. Frey, V. I. Madai, and P. Consortium, \"Explainability for artificial intelligence in healthcare: a multidisciplinary perspective,\" BMC medical informatics and decision making, vol. 20, pp. 1-9, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.399, + 0.924, + 0.434 + ], + "angle": 0, + "content": "[172] B. S. Caffo, F. A. D'Asaro, A. Garcez, and E. Raffinetti, \"Explainable artificial intelligence models and methods in finance and healthcare,\" p. 970246, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.435, + 0.924, + 0.458 + ], + "angle": 0, + "content": "[173] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger, \"On calibration of modern neural networks,\" in ICML, 2017, pp. 1321-1330. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.459, + 0.924, + 0.493 + ], + "angle": 0, + "content": "[174] K. Helli, D. Schnurr, N. Hollmann, S. Müller, and F. Hutter, \"Drift-resilient tabpfn: In-context learning temporal distribution shifts on tabular data,\" in NeurIPS, 2024, pp. 98742-98781. 5, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.494, + 0.924, + 0.528 + ], + "angle": 0, + "content": "[175] J. Demsr, \"Statistical comparisons of classifiers over multiple data sets,\" Journal of Machine Learning Research, vol. 7, pp. 1-30, 2006. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.529, + 0.924, + 0.563 + ], + "angle": 0, + "content": "[176] Y. Gorishniy, A. Kotelnikov, and A. Babenko, \"Tabm: Advancing tabular deep learning with parameter-efficient ensembling,\" in ICLR, 2025. 5, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.924, + 0.588 + ], + "angle": 0, + "content": "[177] M. E. Glickman and A. C. Jones, \"Rating the chess rating system,\" CHANCE-BERLIN THEN NEW YORK-, vol. 12, pp. 21-28, 1999. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.589, + 0.924, + 0.624 + ], + "angle": 0, + "content": "[178] L. M. Hvattum and H. Arntzen, \"Using elo ratings for match result prediction in association football,\" International Journal of forecasting, vol. 26, no. 3, pp. 460-470, 2010. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.625, + 0.924, + 0.67 + ], + "angle": 0, + "content": "[179] J. Ma, V. Thomas, R. Hosseinzadeh, H. Kamkari, A. Labach, J. C. Cresswell, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, \"Tabdpt: Scaling tabular foundation models,\" CoRR, vol. abs/2410.18164, 2024. 6, 18, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.671, + 0.924, + 0.706 + ], + "angle": 0, + "content": "[180] A. Tschalzev, L. Purucker, S. Lüdtke, F. Hutter, C. Bartelt, and H. Stuckenschmidt, \"Unreflected use of tabular data repositories can undermine research quality,\" in ICLR Workshop, 2025. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.707, + 0.924, + 0.741 + ], + "angle": 0, + "content": "[181] S. B. Rabbani, I. V. Medri, and M. D. Samad, \"Attention versus contrastive learning of tabular data - A data-centric benchmarking,\" CoRR, vol. abs/2401.04266, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.742, + 0.924, + 0.776 + ], + "angle": 0, + "content": "[182] Y. Yang, Y. Wang, G. Liu, L. Wu, and Q. Liu, \"Unitabe: A universal pretraining protocol for tabular foundation model in data science,\" in ICLR, 2024. 6, 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.777, + 0.924, + 0.8 + ], + "angle": 0, + "content": "[183] G. Eggert, K. Huo, M. Biven, and J. Waugh, \"Tablib: A dataset of 627m tables with context,\" CoRR, vol. abs/2310.07875, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.801, + 0.924, + 0.846 + ], + "angle": 0, + "content": "[184] H. W. Jian Yang, Xuefeng Li, \"DeepTables: A Deep Learning Python Package for Tabular Data,\" https://github.com/DataCanvasIO/DeepTables, 2022, version 0.2.x.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.848, + 0.924, + 0.882 + ], + "angle": 0, + "content": "[185] N. Erickson, J. Mueller, A. Shirkov, H. Zhang, P. Larroy, M. Li, and A. Smola, \"Autogluon-tabular: Robust and accurate automl for structured data,\" CoRR, vol. abs/2003.06505, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.884, + 0.924, + 0.907 + ], + "angle": 0, + "content": "[186] M. Joseph, \"Pytorch tabular: A framework for deep learning with tabular data,\" CoRR, vol. abs/2104.13638, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.908, + 0.924, + 0.943 + ], + "angle": 0, + "content": "[187] J. R. Zaurin and P. Mulinka, \"pytorch-widedeep: A flexible package for multimodal deep learning,\" Journal of Open Source Software, vol. 8, no. 86, p. 5027, Jun. 2023. 6" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "26" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.089 + ], + "angle": 0, + "content": "[188] S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and H.-J. Ye, \"TALENT: A tabular analytics and learning toolbox,\" CoRR, vol. abs/2407.04057, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.091, + 0.492, + 0.125 + ], + "angle": 0, + "content": "[189] T. Akiba, S. Sano, T. Yanase, T. Ohta, and M. Koyama, \"Optuna: A next-generation hyperparameter optimization framework,\" in KDD, 2019, pp. 2623-2631. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.126, + 0.492, + 0.159 + ], + "angle": 0, + "content": "[190] N. Morgan and H. Bourlard, \"Generalization and parameter estimation in feedforward nets: Some experiments,\" in NeuIPS, 1989, pp. 630-637. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.16, + 0.492, + 0.182 + ], + "angle": 0, + "content": "[191] S. Arlot and A. Celisse, \"A survey of cross-validation procedures for model selection,\" CoRR, vol. abs/0907.4728, 2009. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.183, + 0.492, + 0.218 + ], + "angle": 0, + "content": "[192] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, T.-W. Chen, and T.-H. Chang, \"Prompt: Towards a better deep neural network for tabular data,\" in ICML, 2023, pp. 4392-4434. 7, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.218, + 0.492, + 0.251 + ], + "angle": 0, + "content": "[193] S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, \"GRANDE: gradient-based decision tree ensembles for tabular data,\" in ICLR, 2024. 7, 8, 9, 12, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.252, + 0.492, + 0.296 + ], + "angle": 0, + "content": "[194] X. Jiang, A. Margeloiu, N. Simidjievski, and M. Jamnik, \"Protogate: Prototype-based neural networks with global-to-local feature selection for tabular biomedical data,\" in ICML, 2024, pp. 21844-21878. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.297, + 0.492, + 0.332 + ], + "angle": 0, + "content": "[195] G. C. Cawley and N. L. C. Talbot, \"On over-fitting in model selection and subsequent selection bias in performance evaluation,\" Journal of Machine Learning Research, vol. 11, pp. 2079-2107, 2010. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.333, + 0.492, + 0.367 + ], + "angle": 0, + "content": "[196] T. G. Dietterich, \"Approximate statistical tests for comparing supervised classification learning algorithms,\" Neural Computation, vol. 10, no. 7, pp. 1895-1923, 1998. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.367, + 0.492, + 0.39 + ], + "angle": 0, + "content": "[197] S. Raschka, \"Model evaluation, model selection, and algorithm selection in machine learning,\" CoRR, vol. abs/1811.12808, 2018. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.39, + 0.492, + 0.435 + ], + "angle": 0, + "content": "[198] H. Schulz-Kumpel, S. Fischer, T. Nagler, A. Boulesteix, B. Bischl, and R. Hornung, \"Constructing confidence intervals for 'the' generalization error - a comprehensive benchmark study,\" CoRR, vol. abs/2409.18836, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.436, + 0.492, + 0.469 + ], + "angle": 0, + "content": "[199] T. Nagler, L. Schneider, B. Bischl, and M. Feurer, \"Reshuffling resampling splits can improve generalization of hyperparameter optimization,\" in NeurIPS, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.47, + 0.492, + 0.493 + ], + "angle": 0, + "content": "[200] J. Feng, Y. Yu, and Z. Zhou, \"Multi-layered gradient boosting decision trees,\" in NeurIPS, 2018, pp. 3555-3565. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.494, + 0.492, + 0.538 + ], + "angle": 0, + "content": "[201] I. Padhi, Y. Schiff, I. Melnyk, M. Rigotti, Y. Mroueh, P. Dognin, J. Ross, R. Nair, and E. Altman, \"Tabular transformers for modeling multivariate time series,\" in ICASSP, 2021, pp. 3565-3569. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.539, + 0.492, + 0.584 + ], + "angle": 0, + "content": "[202] F. Di Martino and F. Delmastro, \"Explainable ai for clinical and remote health applications: a survey on tabular and time series data,\" Artificial Intelligence Review, vol. 56, no. 6, pp. 5261-5315, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.585, + 0.492, + 0.62 + ], + "angle": 0, + "content": "[203] G. M. Van de Ven, T. Tuytelaars, and A. S. Tolias, \"Three types of incremental learning,\" Nature Machine Intelligence, vol. 4, no. 12, pp. 1185-1197, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.62, + 0.492, + 0.664 + ], + "angle": 0, + "content": "[204] D.-W. Zhou, Q.-W. Wang, Z.-H. Qi, H.-J. Ye, D.-C. Zhan, and Z. Liu, \"Class-incremental learning: A survey,\" IEEE transactions on pattern analysis and machine intelligence, vol. 46, no. 12, pp. 9851-9873, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.665, + 0.492, + 0.688 + ], + "angle": 0, + "content": "[205] J. Yosinski, J. Clune, Y. Bengio, and H. Lipson, \"How transferable are features in deep neural networks?\" in NIPS, vol. 27, 2014. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.689, + 0.492, + 0.734 + ], + "angle": 0, + "content": "[206] S. U. H. Dar, M. Özbey, A. B. Çatlı, and T. Çukur, \"A transfer-learning approach for accelerated mri using deep neural networks,\" Magnetic resonance in medicine, vol. 84, no. 2, pp. 663-685, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.735, + 0.492, + 0.768 + ], + "angle": 0, + "content": "[207] Y. Cao, Z. Fang, Y. Wu, D.-X. Zhou, and Q. Gu, \"Towards understanding the spectral bias of deep learning,\" CoRR, vol. abs/1912.01198, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.77, + 0.492, + 0.804 + ], + "angle": 0, + "content": "[208] R. Basri, M. Galun, A. Geifman, D. Jacobs, Y. Kasten, and S. Kritchman, \"Frequency bias in neural networks for input of non-uniform density,\" in ICML, 2020, pp. 685-694. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.805, + 0.492, + 0.839 + ], + "angle": 0, + "content": "[209] F. Matteucci, V. Arzamasov, and K. Böhm, \"A benchmark of categorical encoders for binary classification,\" in NeurIPS, 2023, pp. 54855-54875. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.839, + 0.492, + 0.873 + ], + "angle": 0, + "content": "[210] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, \"Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's,\" in SIGKDD, 2024, pp. 3679-3689. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.873, + 0.492, + 0.907 + ], + "angle": 0, + "content": "[211] M. Pang, K. M. Ting, P. Zhao, and Z. Zhou, \"Improving deep forest by screening,\" IEEE Transactions on Knowledge and Data Engineering., vol. 34, no. 9, pp. 4298-4312, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.908, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[212] M. T. Ribeiro, S. Singh, and C. Guestrin, \"why should I trust you?: Explaining the predictions of any classifier,\" in KDD, 2016, pp. 1135-1144. 8" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.056, + 0.922, + 0.079 + ], + "angle": 0, + "content": "[213] S. M. Lundberg and S. Lee, “A unified approach to interpreting model predictions,” in NIPS, 2017, pp. 4765-4774. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.08, + 0.924, + 0.102 + ], + "angle": 0, + "content": "[214] Z.-H. Zhou and J. Feng, \"Deep forest,\" National science review, vol. 6, no. 1, pp. 74-86, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.103, + 0.924, + 0.137 + ], + "angle": 0, + "content": "[215] Y. Cheng, R. Hu, H. Ying, X. Shi, J. Wu, and W. Lin, \"Arithmetic feature interaction is necessary for deep tabular learning,\" in AAAI, 2024, pp. 11516-11524. 9, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.138, + 0.924, + 0.182 + ], + "angle": 0, + "content": "[216] J. Kossen, N. Band, C. Lyle, A. N. Gomez, T. Rainforth, and Y. Gal, \"Self-attention between datapoints: Going beyond individual input-output pairs in deep learning,\" in NeurIPS, 2021, pp. 28742-28756. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.183, + 0.924, + 0.217 + ], + "angle": 0, + "content": "[217] B. Schäfl, L. Gruber, A. Bitto-Nemling, and S. Hochreiter, \"Hop- ular: Modern hopfield networks for tabular data,\" CoRR, vol. abs/2206.00664, 2022. 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.218, + 0.924, + 0.251 + ], + "angle": 0, + "content": "[218] H. Kim, A. Mnih, J. Schwarz, M. Garnelo, S. M. A. Eslami, D. Rosenbaum, O. Vinyals, and Y. W. Teh, \"Attentive neural processes,\" in ICLR, 2019. 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.252, + 0.924, + 0.285 + ], + "angle": 0, + "content": "[219] I. Shavitt and E. Segal, \"Regularization learning networks: deep learning for tabular datasets,\" in NeurIPS, 2018, pp. 1386-1396. 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.286, + 0.924, + 0.321 + ], + "angle": 0, + "content": "[220] V. Verma, T. Luong, K. Kawaguchi, H. Pham, and Q. V. Le, \"Towards domain-agnostic contrastive learning,\" in ICML, 2021, pp. 10530-10541. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.321, + 0.924, + 0.354 + ], + "angle": 0, + "content": "[221] C. Lee, F. Imrie, and M. van der Schaar, \"Self-supervision enhanced feature selection with correlated gates,\" in ICLR, 2022. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.355, + 0.924, + 0.39 + ], + "angle": 0, + "content": "[222] R. Levin, V. Cherepanova, A. Schwarzschild, A. Bansal, C. B. Bruss, T. Goldstein, A. G. Wilson, and M. Goldblum, \"Transfer learning with deep tabular models,\" in ICLR, 2023. 9, 13, 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.924, + 0.413 + ], + "angle": 0, + "content": "[223] K. Majmundar, S. Goyal, P. Netrapalli, and P. Jain, \"MET: masked encoding for tabular data,\" CoRR, vol. abs/2206.08564, 2022. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.414, + 0.924, + 0.447 + ], + "angle": 0, + "content": "[224] E. Hajiramezanali, N. L. Diamant, G. Scalia, and M. W. Shen, \"Stab: Self-supervised learning for tabular data,\" in NeurIPS Workshop, 2022. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.448, + 0.924, + 0.481 + ], + "angle": 0, + "content": "[225] S. Chen, J. Wu, N. Hovakimyan, and H. Yao, \"Recontab: Regularized contrastive representation learning for tabular data,\" CoRR, vol. abs/2310.18541, 2023. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.482, + 0.924, + 0.517 + ], + "angle": 0, + "content": "[226] W.-W. Du, W.-Y. Wang, and W.-C. Peng, \"Dora: Domain-based self-supervised learning framework for low-resource real estate appraisal,\" in CIKM, 2023, pp. 4552-4558. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.518, + 0.924, + 0.551 + ], + "angle": 0, + "content": "[227] Y. Sui, T. Wu, J. C. Cresswell, G. Wu, G. Stein, X. S. Huang, X. Zhang, and M. Volkovs, \"Self-supervised representation learning from random data projectors,\" in ICLR, 2024. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.552, + 0.924, + 0.584 + ], + "angle": 0, + "content": "[228] T. Iwata and A. Kumagai, \"Meta-learning from tasks with heterogeneous attribute spaces,\" in NeurIPS, 2020, pp. 6053-6063. 9, 13, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.585, + 0.924, + 0.62 + ], + "angle": 0, + "content": "[229] L. Liu, M. M. Fard, and S. Zhao, \"Distribution embedding networks for generalization from a diverse set of classification tasks,\" Transactions on Machine Learning Research, 2022. 9, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.621, + 0.924, + 0.655 + ], + "angle": 0, + "content": "[230] B. Zhu, X. Shi, N. Erickson, M. Li, G. Karypis, and M. Shoaran, \"Xtab: Cross-table pretraining for tabular transformers,\" in ICML, 2023, pp. 43181-43204. 9, 12, 13, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.656, + 0.924, + 0.689 + ], + "angle": 0, + "content": "[231] Y. Zhang, K. Gong, K. Zhang, H. Li, Y. Qiao, W. Ouyang, and X. Yue, \"Meta-transformer: A unified framework for multimodal learning,\" CoRR, vol. abs/2307.10802, 2023. 9, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.689, + 0.924, + 0.723 + ], + "angle": 0, + "content": "[232] G. Liu, J. Yang, and L. Wu, \"Ptab: Using the pre-trained language model for modeling tabular data,\" CoRR, vol. abs/2209.08060, 2022. 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.724, + 0.924, + 0.757 + ], + "angle": 0, + "content": "[233] M. J. Kim, L. Grinsztajn, and G. Varoquaux, \"CARTE: pretraining and transfer for tabular learning,\" in ICML, 2024, pp. 23843-23866. 9, 16, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.758, + 0.924, + 0.803 + ], + "angle": 0, + "content": "[234] Z. Cheng, T. Xie, P. Shi, C. Li, R. Nadkarni, Y. Hu, C. Xiong, D. Radev, M. Ostendorf, L. Zettlemoyer, N. A. Smith, and T. Yu, \"Binding language models in symbolic languages,\" in ICLR, 2023. 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.804, + 0.924, + 0.838 + ], + "angle": 0, + "content": "[235] T. Zhang, S. Wang, S. Yan, L. Jian, and Q. Liu, \"Generative table pre-training empowers models for tabular prediction,\" in EMNLP, 2023. 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.839, + 0.924, + 0.884 + ], + "angle": 0, + "content": "[236] T. Dinh, Y. Zeng, R. Zhang, Z. Lin, M. Gira, S. Rajput, J. yong Sohn, D. S. Papailiopoulos, and K. Lee, \"LIFT: language-interfaced fine-tuning for non-language machine learning tasks,\" in NeurIPS, 2022, pp. 11763-11784. 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.924, + 0.918 + ], + "angle": 0, + "content": "[237] R. Wang, Z. Wang, and J. Sun, \"Unipredict: Large language models are universal tabular predictors,\" CoRR, vol. abs/2310.03266, 2023. 9, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.919, + 0.924, + 0.943 + ], + "angle": 0, + "content": "[238] A. Sharma, E. Vans, D. Shigemizu, K. A. Boroevich, and T. Tsunoda, \"Deepinsight: A methodology to transform a non-image" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.924, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "27" + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.055, + 0.493, + 0.079 + ], + "angle": 0, + "content": "data to an image for convolution neural network architecture,\" Scientific reports, vol. 9, no. 1, p. 11399, 2019. 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.08, + 0.493, + 0.135 + ], + "angle": 0, + "content": "[239] O. Bazgir, R. Zhang, S. R. Dhruba, R. Rahman, S. Ghosh, and R. Pal, \"Representation of features as images with neighborhood dependencies for compatibility with convolutional neural networks,\" Nature communications, vol. 11, no. 1, p. 4391, 2020. 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.136, + 0.493, + 0.171 + ], + "angle": 0, + "content": "[240] L. Buturovic and D. Miljkovic, \"A novel method for classification of tabular data using convolutional neural networks,\" BioRxiv, pp. 2020-05, 2020. 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.171, + 0.493, + 0.228 + ], + "angle": 0, + "content": "[241] V. Gómez-Martínez, F. J. Lara-Abelenda, P. Peiro-Corbacho, D. Chushig-Muzo, C. Granja, and C. Soguero-Ruiz, \"LM-IGTD: a 2d image generator for low-dimensional and mixed-type tabular data to leverage the potential of convolutional neural networks,\" CoRR, vol. abs/2406.14566, 2024. 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.228, + 0.493, + 0.263 + ], + "angle": 0, + "content": "[242] B. Sun, L. Yang, W. Zhang, M. Lin, P. Dong, C. Young, and J. Dong, \"Supertml: Two-dimensional word embedding for the precognition on structured tabular data,\" in CVPR Workshops, 2019. 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.263, + 0.493, + 0.298 + ], + "angle": 0, + "content": "[243] Z. Wang, C. Gao, C. Xiao, and J. Sun, \"Meditab: Scaling medical tabular data predictors via data consolidation, enrichment, and refinement,\" in *IJCAI*, 2024, pp. 6062-6070. 9, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.298, + 0.493, + 0.343 + ], + "angle": 0, + "content": "[244] R. Bommasani, D. A. Hudson, E. Adeli, R. Altman, S. Arora, S. von Arx, M. S. Bernstein, J. Bohg, A. Bosselut, E. Brunskill et al., \"On the opportunities and risks of foundation models,\" CoRR, vol. abs/2108.07258, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.343, + 0.493, + 0.367 + ], + "angle": 0, + "content": "[245] J. Goldberger, G. E. Hinton, S. Roweis, and R. R. Salakhutdinov, \"Neighbourhood components analysis,\" in NIPS, vol. 17, 2004. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.367, + 0.493, + 0.447 + ], + "angle": 0, + "content": "[246] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D. M. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. McCandlish, A. Radford, I. Sutskever, and D. Amodei, \"Language models are few-shot learners,\" in NeurIPS, 2020, pp. 1877-1901. 10, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.447, + 0.493, + 0.481 + ], + "angle": 0, + "content": "[247] R. Tibshirani, \"Regression shrinkage and selection via the lasso,\" Journal of the Royal Statistical Society Series B: Statistical Methodology, vol. 58, no. 1, pp. 267-288, 1996. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.481, + 0.493, + 0.516 + ], + "angle": 0, + "content": "[248] A. E. Hoerl and R. W. Kennard, \"Ridge regression: Biased estimation for nonorthogonal problems,\" Technometrics, vol. 12, no. 1, pp. 55-67, 1970. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.516, + 0.493, + 0.551 + ], + "angle": 0, + "content": "[249] H. Zou and T. Hastie, “Zou h, hastie t. regularization and variable selection via the elastic net.” Journal of the Royal Statistical Society: Series B (Statistical Methodology), vol. 67, pp. 301–320, 2005. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.551, + 0.493, + 0.584 + ], + "angle": 0, + "content": "[250] J. T. Hancock and T. M. Khoshgoftaar, \"Survey on categorical data for neural networks,\" Journal of big data, vol. 7, no. 1, p. 28, 2020. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.584, + 0.493, + 0.607 + ], + "angle": 0, + "content": "[251] J. R. Quinlan, C4.5: programs for machine learning. Elsevier, 2014. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.493, + 0.629 + ], + "angle": 0, + "content": "[252] L. Breiman, \"Random forests,\" Machine learning, vol. 45, pp. 5-32, 2001. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.63, + 0.493, + 0.665 + ], + "angle": 0, + "content": "[253] Z.-H. Zhou and Y. Jiang, \"Nec4. 5: Neural ensemble based c4. 5,\" IEEE Transactions on knowledge and data engineering, vol. 16, no. 6, pp. 770-773, 2004. 12, 14, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.665, + 0.493, + 0.688 + ], + "angle": 0, + "content": "[254] T. Hastie and R. Tibshirani, \"Generalized additive models,\" Statistical science, vol. 1, no. 3, pp. 297-310, 1986. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.688, + 0.493, + 0.734 + ], + "angle": 0, + "content": "[255] R. Agarwal, L. Melnick, N. Frosst, X. Zhang, B. Lengerich, R. Caruana, and G. E. Hinton, \"Neural additive models: Interpretable machine learning with neural nets,\" in NeurIPS, 2021, pp. 4699-4711. 12, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.734, + 0.493, + 0.769 + ], + "angle": 0, + "content": "[256] W.-Y. Wang, W.-W. Du, D. Xu, W. Wang, and W.-C. Peng, \"A survey on self-supervised learning for non-sequential tabular data,\" Machine Learning, vol. 114, no. 1, p. 16, 2025. 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.769, + 0.493, + 0.792 + ], + "angle": 0, + "content": "[257] G. Hinton, O. Vinyals, and J. Dean, \"Distilling the knowledge in a neural network,\" CoRR, vol. abs/1503.02531, 2015. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.792, + 0.493, + 0.827 + ], + "angle": 0, + "content": "[258] S. Yun, D. Han, S. Chun, S. J. Oh, Y. Yoo, and J. Choe, \"Cutmix: Regularization strategy to train strong classifiers with localizable features,\" in ICCV, 2019, pp. 6023-6032. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.827, + 0.493, + 0.85 + ], + "angle": 0, + "content": "[259] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, \"mixup: Beyond empirical risk minimization,\" in ICLR, 2018. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.85, + 0.493, + 0.884 + ], + "angle": 0, + "content": "[260] C. Hou and Z.-H. Zhou, \"One-pass learning with incremental and decremental features,\" IEEE transactions on pattern analysis and machine intelligence, vol. 40, no. 11, pp. 2776-2792, 2017. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.884, + 0.493, + 0.918 + ], + "angle": 0, + "content": "[261] H.-J. Ye, D.-C. Zhan, Y. Jiang, and Z.-H. Zhou, \"Rectify heterogeneous models with semantic mapping,\" in ICML, 2018, pp. 5630-5639. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.918, + 0.493, + 0.943 + ], + "angle": 0, + "content": "[262] H.-J. Ye, L. Han, and D.-C. Zhan, \"Revisiting unsupervised meta-learning via the characteristics of few-shot tasks,\" IEEE" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.055, + 0.493, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.055, + 0.924, + 0.079 + ], + "angle": 0, + "content": "Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 3, pp. 3721-3737, 2022. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.08, + 0.924, + 0.125 + ], + "angle": 0, + "content": "[263] Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis, L. Zettlemoyer, and V. Stoyanov, \"Roberta: A robustly optimized bert pretraining approach,\" CoRR, vol. abs/1907.11692, 2019. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.127, + 0.924, + 0.151 + ], + "angle": 0, + "content": "[264] F. Mahdisoltani, J. Biega, and F. M. Suchanek, \"YAGO3: A knowledge base from multilingual wikipediais,\" in CIDR, 2015. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.151, + 0.924, + 0.196 + ], + "angle": 0, + "content": "[265] N. Hollmann, S. Müller, and F. Hutter, \"Large language models for automated data science: Introducing caafe for context-aware automated feature engineering,\" in NeurIPS, 2023, pp. 44753-44775. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.197, + 0.924, + 0.233 + ], + "angle": 0, + "content": "[266] S. Han, J. Yoon, S. O. Arik, and T. Pfister, \"Large language models can automatically engineer features for few-shot tabular learning,\" in ICML, 2024, pp. 17454-17479. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.233, + 0.924, + 0.268 + ], + "angle": 0, + "content": "[267] J. Herzig, P. K. Nowak, T. Müller, F. Piccinno, and J. M. Eisenschlos, \"Tapas: Weakly supervised table parsing via pre-training,\" in ACL, 2020, pp. 4320-4333. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.268, + 0.924, + 0.303 + ], + "angle": 0, + "content": "[268] P. Yin, G. Neubig, W. tau Yih, and S. Riedel, \"Tabert: Pretraining for joint understanding of textual and tabular data,\" in ACL, 2020, pp. 8413-8426. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.304, + 0.924, + 0.339 + ], + "angle": 0, + "content": "[269] M. Chen, L. Shen, Z. Li, X. J. Wang, J. Sun, and C. Liu, \"Visions: Visual masked autoencoders are free-lunch zero-shot time series forecasters,\" CoRR, vol. abs/2408.17253, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.339, + 0.924, + 0.373 + ], + "angle": 0, + "content": "[270] Z. Li, S. Li, and X. Yan, \"Time series as images: Vision transformer for irregularly sampled time series,\" in NeurIPS, 2023, pp. 49 187-49 204. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.373, + 0.924, + 0.41 + ], + "angle": 0, + "content": "[271] A. Kirillov, E. Mintun, N. Ravi, H. Mao, C. Rolland, L. Gustafson, T. Xiao, S. Whitehead, A. C. Berg, W.-Y. Lo, P. Dólar, and R. B. Girshick, \"Segment anything,\" in ICCV, 2023, pp. 3992-4003. 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.41, + 0.924, + 0.433 + ], + "angle": 0, + "content": "[272] D. Ha, A. M. Dai, and Q. V. Le, \"Hypernetworks,\" in ICLR, 2017. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.434, + 0.924, + 0.469 + ], + "angle": 0, + "content": "[273] W.-L. Chao, H.-J. Ye, D.-C. Zhan, M. E. Campbell, and K. Q. Weinberger, “Revisiting meta-learning as supervised learning,” CoRR, vol. abs/2002.00573, 2020. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.469, + 0.924, + 0.494 + ], + "angle": 0, + "content": "[274] J. Peters, D. Janzing, and B. Scholkopf, Elements of causal inference: foundations and learning algorithms. The MIT Press, 2017. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.494, + 0.924, + 0.517 + ], + "angle": 0, + "content": "[275] R. Neal, Bayesian Learning for Neural Networks, ser. Incs. springer, 1996. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.518, + 0.924, + 0.552 + ], + "angle": 0, + "content": "[276] S. Müller, N. Hollmann, S. Pineda-Arango, J. Grabocka, and F. Hutter, \"Transformers can do bayesian inference,\" in ICLR, 2022. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.553, + 0.924, + 0.588 + ], + "angle": 0, + "content": "[277] H.-J. Ye, S.-Y. Liu, and W.-L. Chao, \"A closer look at tabpfn v2: Strength, limitation, and extension,\" CoRR, vol. abs/2502.17361, 2025. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.589, + 0.924, + 0.624 + ], + "angle": 0, + "content": "[278] T. Iwata and A. Kumagai, \"Meta-learning of semi-supervised learning from tasks with heterogeneous attribute spaces,\" CoRR, vol. abs/2311.05088, 2023. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.625, + 0.924, + 0.66 + ], + "angle": 0, + "content": "[279] T. Nagler, \"Statistical foundations of prior-data fitted networks,\" in ICML, A. Krause, E. Brunskill, K. Cho, B. Engelhardt, S. Sabato, and J. Scarlett, Eds., 2023, pp. 25660-25676. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.66, + 0.924, + 0.694 + ], + "angle": 0, + "content": "[280] J. Ma, A. Dankar, G. Stein, G. Yu, and A. L. Caterini, \"Tabpfgen - tabular data generation with tabpfn,\" CoRR, vol. abs/2406.05216, 2024. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.924, + 0.742 + ], + "angle": 0, + "content": "[281] S. Ruiz-Villafranca, J. R. Gómez, J. M. C. Gómez, J. C. Mondéjar, and J. L. Martínez, \"A tabpfn-based intrusion detection system for the industrial internet of things,\" The Journal of Supercomputing, vol. 80, no. 14, pp. 20080-20117, 2024. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.742, + 0.924, + 0.788 + ], + "angle": 0, + "content": "[282] A. Margeloiu, A. Bazaga, N. Simidjievski, P. Lio, and M. Jamnik, \"Tabmda: Tabular manifold data augmentation for any classifier using transformers with in-context subsetting,\" CoRR, vol. abs/2406.01805, 2024. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.789, + 0.924, + 0.835 + ], + "angle": 0, + "content": "[283] S. B. Hoo, S. Müller, D. Salinas, and F. Hutter, \"The tabular foundation model tabpfn outperforms specialized time series forecasting models based on simple features,\" CoRR, vol. abs/2501.02945, 2025. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.835, + 0.924, + 0.87 + ], + "angle": 0, + "content": "[284] F. den Breejen, S. Bae, S. Cha, and S.-Y. Yun, \"Fine-tuned in-context learning transformers are excellent tabular data classifiers,\" CoRR, vol. abs/2405.13396v2, 2025. 18, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.871, + 0.924, + 0.906 + ], + "angle": 0, + "content": "[285] Y. Wu and D. L. Bergman, \"Zero-shot meta-learning for tabular prediction tasks with adversarially pre-trained transformer,\" CoRR, vol. abs/2502.04573, 2025. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.906, + 0.924, + 0.942 + ], + "angle": 0, + "content": "[286] J. Qu, D. Holzmüller, G. Varoquaux, and M. L. Morvan, \"Tabicl: A tabular foundation model for in-context learning on large data,\" CoRR, vol. abs/2502.05564, 2025. 18, 19" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.055, + 0.924, + 0.942 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.076, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "28" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.056, + 0.492, + 0.09 + ], + "angle": 0, + "content": "[287] B. Feuer, C. Hegde, and N. Cohen, \"Scaling tabpfn: Sketching and feature selection for tabular prior-data fitted networks,\" CoRR, vol. abs/2311.10609, 2023. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.091, + 0.492, + 0.113 + ], + "angle": 0, + "content": "[288] J. Ma, V. Thomas, G. Yu, and A. L. Caterini, \"In-context data distillation with tabpfn,\" CoRR, vol. abs/2402.06971, 2024. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.114, + 0.492, + 0.16 + ], + "angle": 0, + "content": "[289] B. Feuer, R. T. Schirrmeister, V. Cherepanova, C. Hegde, F. Hutter, M. Goldblum, N. Cohen, and C. White, \"Tunetables: Context optimization for scalable prior-data fitted networks,\" in NeurIPS, 2024, pp. 83430-83464. 18, 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.16, + 0.492, + 0.194 + ], + "angle": 0, + "content": "[290] D. Xu, O. Cirit, R. Asadi, Y. Sun, and W. Wang, \"Mixture of in-context prompters for tabular pfns,\" CoRR, vol. abs/2405.16156, 2024. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.195, + 0.492, + 0.229 + ], + "angle": 0, + "content": "[291] M. Koshil, T. Nagler, M. Feurer, and K. Eggensperger, \"Towards localization via data embedding for tabPFN,\" in NeurIPS Workshop, 2024. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.23, + 0.492, + 0.264 + ], + "angle": 0, + "content": "[292] Y. Zeng, W. Kang, and A. C. Mueller, \"Tabflex: Scaling tabular learning to millions with linear attention,\" in NeurIPS Workshop, 2024. 19, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.265, + 0.492, + 0.299 + ], + "angle": 0, + "content": "[293] S. K. Baur and S. Kim, “Exploration of autoregressive models for in-context learning on tabular data,” in NeurIPS Workshop, 2024. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.3, + 0.492, + 0.334 + ], + "angle": 0, + "content": "[294] M. Arbel, D. Salinas, and F. Hutter, \"Equitabpfn: A target-permutation equivariant prior fitted networks,\" CoRR, vol. abs/2502.06684, 2025. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.335, + 0.492, + 0.369 + ], + "angle": 0, + "content": "[295] Y. Sun, X. Wen, S. Zheng, X. Jia, and J. Bian, \"Scaling generative tabular learning for large language models,\" in NeurIPS Workshop, 2024. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.37, + 0.492, + 0.394 + ], + "angle": 0, + "content": "[296] Y. Freund, R. E. Schapire et al., \"Experiments with a new boosting algorithm,\" in ICML, vol. 96, 1996, pp. 148-156. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.394, + 0.492, + 0.417 + ], + "angle": 0, + "content": "[297] Z.-H. Zhou, Ensemble methods: foundations and algorithms. CRC press, 2012. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.418, + 0.492, + 0.451 + ], + "angle": 0, + "content": "[298] Y. Wen, D. Tran, and J. Ba, \"Batchsemble: an alternative approach to efficient ensemble and lifelong learning,\" in ICLR, 2020. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.452, + 0.492, + 0.498 + ], + "angle": 0, + "content": "[299] M. Jayawardhana, Renbo, S. Dooley, V. Cherepanova, A. G. Wilson, F. Hutter, C. White, T. Goldstein, and M. Goldblum, \"Transformers boost the performance of decision trees on tabular data across sample sizes,\" CoRR, vol. abs/2502.02672v2, 2025. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.499, + 0.492, + 0.522 + ], + "angle": 0, + "content": "[300] R. Caruana, A. Munson, and A. Niculescu-Mizil, “Getting the most out of ensemble selection,” in ICDM, 2006, pp. 828-833. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.522, + 0.492, + 0.556 + ], + "angle": 0, + "content": "[301] Y. Wang, B. Jiang, Y. Guo, Q. Gan, D. Wipf, X. Huang, and X. Qiu, \"Prior-fitted networks scale to larger datasets when treated as weak learners,\" CoRR, vol. abs/2503.01256, 2025. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.557, + 0.492, + 0.58 + ], + "angle": 0, + "content": "[302] J. C. Gower, \"A general coefficient of similarity and some of its properties,\" Biometrics, pp. 857-871, 1971. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.581, + 0.492, + 0.603 + ], + "angle": 0, + "content": "[303] F. T. Liu, K. M. Ting, and Z.-H. Zhou, \"Isolation forest,\" in ICDM, 2008, pp. 413-422. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.604, + 0.492, + 0.638 + ], + "angle": 0, + "content": "[304] M. M. Breunig, H.-P. Kriegel, R. T. Ng, and J. Sander, “Lof: identifying density-based local outliers,” in SIGMOD, 2000, pp. 93-104. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.639, + 0.492, + 0.662 + ], + "angle": 0, + "content": "[305] T. Shenkar and L. Wolf, \"Anomaly detection for tabular data with internal contrastive learning,\" in ICLR, 2022. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.663, + 0.492, + 0.696 + ], + "angle": 0, + "content": "[306] A. Li, Y. Zhao, C. Qiu, M. Kloft, P. Smyth, M. Rudolph, and S. Mandt, \"Anomaly detection of tabular data using llms,\" CoRR, vol. abs/2406.16308, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.697, + 0.492, + 0.732 + ], + "angle": 0, + "content": "[307] C. Lee, J. Kim, and N. Park, \"Codi: Co-evolving contrastive diffusion models for mixed-type tabular synthesis,\" in ICML, 2023, pp. 18940-18956. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.733, + 0.492, + 0.766 + ], + "angle": 0, + "content": "[308] R. Tu, Z. Senane, L. Cao, C. Zhang, H. Kjellström, and G. E. Henter, \"Causality for tabular data synthesis: A high-order structure causal benchmark framework,\" CoRR, vol. abs/2406.08311, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.767, + 0.492, + 0.8 + ], + "angle": 0, + "content": "[309] R. Feinman and B. M. Lake, \"Generating new concepts with hybrid neuro-symbolic models,\" CoRR, vol. abs/2003.08978, 2020. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.801, + 0.492, + 0.825 + ], + "angle": 0, + "content": "[310] T. Hastie, “The elements of statistical learning: data mining, inference, and prediction,” 2009. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.826, + 0.492, + 0.849 + ], + "angle": 0, + "content": "[311] B. M. Greenwell et al., \"pdp: An r package for constructing partial dependence plots,\" R Journal, vol. 9, no. 1, p. 421, 2017. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.849, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[312] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, C.-S. Chen, and D. T.-H. Chang, \"Dofen: Deep oblivious forest ensemble,\" in NeurIPS, 2024, pp. 44624-44677. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.884, + 0.492, + 0.917 + ], + "angle": 0, + "content": "[313] B. Sun and K. Saenko, \"Deep CORAL: correlation alignment for deep domain adaptation,\" in ECCV Workshops (3), 2016, pp. 443-450. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.918, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[314] C. Kim, T. Kim, S. Woo, J. Y. Yang, and E. Yang, \"Adaptable: Test-time adaptation for tabular data via shift-aware uncertainty cali" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.056, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.056, + 0.924, + 0.078 + ], + "angle": 0, + "content": "brator and label distribution handler,\" CoRR, vol. abs/2407.10784, 2024. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.08, + 0.925, + 0.126 + ], + "angle": 0, + "content": "[315] Y. Ganin, E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, and V. S. Lempitsky, \"Domain-adversarial training of neural networks,\" J. Mach. Learn. Res., vol. 17, pp. 59:1-59:35, 2016. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.127, + 0.925, + 0.15 + ], + "angle": 0, + "content": "[316] S. Sagawa, P. W. Koh, T. B. Hashimoto, and P. Liang, \"Distribu-tionally robust neural networks,\" in ICLR, 2020. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.151, + 0.925, + 0.186 + ], + "angle": 0, + "content": "[317] D. Levy, Y. Carmon, J. C. Duchi, and A. Sidford, \"Large-scale methods for distributionally robust optimization,\" in NeurIPS, 2020, pp. 8847-8860. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.187, + 0.925, + 0.221 + ], + "angle": 0, + "content": "[318] J. Zhang, A. K. Menon, A. Veit, S. Bhojanapalli, S. Kumar, and S. Sra, \"Coping with label shift via distributionally robust optimisation,\" in ICLR, 2021. 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.222, + 0.925, + 0.255 + ], + "angle": 0, + "content": "[319] H.-R. Cai and H.-J. Ye, \"Understanding the limits of deep tabular methods with temporal shift,\" CoRR, vol. abs/2502.20260, 2025. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.257, + 0.925, + 0.293 + ], + "angle": 0, + "content": "[320] W. Huang, \"Multimodal contrastive learning and tabular attention for automated alzheimers disease prediction,\" in ICCV (Workshops), 2023, pp. 2465-2474. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.294, + 0.925, + 0.328 + ], + "angle": 0, + "content": "[321] S. Du, S. Zheng, Y. Wang, W. Bai, D. P. O'Regan, and C. Qin, \"Tip: Tabular-image pre-training for multimodal classification with incomplete data,\" in ECCV, 2024, pp. 478-496. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.329, + 0.925, + 0.353 + ], + "angle": 0, + "content": "[322] A. Gilani, S. R. Qasim, I. Malik, and F. Shafait, \"Table detection using deep learning,\" in ICDAR, 2017, pp. 771-776. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.354, + 0.925, + 0.388 + ], + "angle": 0, + "content": "[323] M. Li, L. Cui, S. Huang, F. Wei, M. Zhou, and Z. Li, \"Tablebank: Table benchmark for image-based table detection and recognition,\" in LREC, 2020, pp. 1918-1925. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.389, + 0.925, + 0.424 + ], + "angle": 0, + "content": "[324] S. Schreiber, S. Agne, I. Wolf, A. Dengel, and S. Ahmed, \"Deepdesrt: Deep learning for detection and structure recognition of tables in document images,\" in ICDAR, 2017, pp. 1162-1167. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.425, + 0.925, + 0.48 + ], + "angle": 0, + "content": "[325] M. s. Kasem, A. Abdallah, A. Berendeyev, E. Elkady, M. Mahmoud, M. Abdalla, M. Hamada, S. Vascon, D. Nurseitov, and I. Taj-eddin, \"Deep learning for table detection and structure recognition: A survey,\" ACM Computing Surveys, vol. 56, no. 12, pp. 1-41, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.482, + 0.925, + 0.517 + ], + "angle": 0, + "content": "[326] W. Chen, M.-W. Chang, E. Schlinger, W. Wang, and W. W. Cohen, \"Open question answering over tables and text,\" CoRR, vol. abs/2010.10439, 2020. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.518, + 0.925, + 0.564 + ], + "angle": 0, + "content": "[327] A. Talmor, O. Yoran, A. Catav, D. Lahav, Y. Wang, A. Asai, G. Ilharco, H. Hajishirzi, and J. Berant, \"Multimodalqa: Complex question answering over text, tables and images,\" CoRR, vol. abs/2104.06039, 2021. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.927, + 0.6 + ], + "angle": 0, + "content": "[328] S. Appalaraju, B. Jasani, B. U. Kota, Y. Xie, and R. Manmatha, \"Docformer: End-to-end transformer for document understanding,\" in ICCV, 2021, pp. 993-1003. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.601, + 0.925, + 0.636 + ], + "angle": 0, + "content": "[329] C. Da, P. Wang, and C. Yao, \"Multi-granularity prediction with learnable fusion for scene text recognition,\" CoRR, vol. abs/2307.13244, 2023. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.925, + 0.682 + ], + "angle": 0, + "content": "[330] Z. Gu, C. Meng, K. Wang, J. Lan, W. Wang, M. Gu, and L. Zhang, \"Xylayoutlm: Towards layout-aware multimodal networks for visually-rich document understanding,\" in CVPR, 2022, pp. 4583-4592. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.684, + 0.925, + 0.718 + ], + "angle": 0, + "content": "[331] A. Nassar, N. Livathinos, M. Lysak, and P. Staar, \"Tableformer: Table structure understanding with transformers,\" in CVPR, 2022, pp. 4614-4623. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.719, + 0.925, + 0.754 + ], + "angle": 0, + "content": "[332] G. Kim, T. Hong, M. Yim, J. Park, J. Yim, W. Hwang, S. Yun, D. Han, and S. Park, \"Donut: Document understanding transformer withoutOCR,\" CoRR, vol. abs/2111.15664, 2021. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.755, + 0.925, + 0.8 + ], + "angle": 0, + "content": "[333] H. Feng, Z. Wang, J. Tang, J. Lu, W. Zhou, H. Li, and C. Huang, \"Unidoc: A universal large multimodal model for simultaneous text detection, recognition, spotting and understanding,\" CoRR, vol. abs/2308.11592, 2023. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.801, + 0.925, + 0.849 + ], + "angle": 0, + "content": "[334] J. Wan, S. Song, W. Yu, Y. Liu, W. Cheng, F. Huang, X. Bai, C. Yao, and Z. Yang, \"Omniparser: A unified framework for text spotting key information extraction and table recognition,\" in CVPR, 2024, pp. 15641-15653. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.849, + 0.925, + 0.895 + ], + "angle": 0, + "content": "[335] W. Zhao, H. Feng, Q. Liu, J. Tang, S. Wei, B. Wu, L. Liao, Y. Ye, H. Liu, W. Zhou et al., \"Tabpedia: Towards comprehensive visual table understanding with concept synergy,\" CoRR, vol. abs/2406.01326, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.896, + 0.925, + 0.941 + ], + "angle": 0, + "content": "[336] Z. Li, B. Yang, Q. Liu, Z. Ma, S. Zhang, J. Yang, Y. Sun, Y. Liu, and X. Bai, \"Monkey: Image resolution and text label are important things for large multi-modal models,\" in CVPR, 2024, pp. 26763-26773. 21" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.927, + 0.941 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.411, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "29" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.055, + 0.494, + 0.09 + ], + "angle": 0, + "content": "[337] Y. Liu, B. Yang, Q. Liu, Z. Li, Z. Ma, S. Zhang, and X. Bai, \"Textmonkey: AnOCR-free large multimodal model for understanding document,\" CoRR, vol. abs/2403.04473, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.091, + 0.494, + 0.135 + ], + "angle": 0, + "content": "[338] J. Ye, A. Hu, H. Xu, Q. Ye, M. Yan, Y. Dan, C. Zhao, G. Xu, C. Li, J. Tian et al., \"mplug-docowl: Modularized multimodal large language model for document understanding,\" CoRR, vol. abs/2307.02499, 2023. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.136, + 0.494, + 0.182 + ], + "angle": 0, + "content": "[339] N. Deng, Z. Sun, R. He, A. Sikka, Y. Chen, L. Ma, Y. Zhang, and R. Mihalcea, \"Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data,\" CoRR, vol. abs/2402.12424, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.182, + 0.494, + 0.204 + ], + "angle": 0, + "content": "[340] Z.-H. Zhou, \"Open-environment machine learning,\" National Science Review, vol. 9, no. 8, p. nwac123, 07 2022. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.205, + 0.494, + 0.239 + ], + "angle": 0, + "content": "[341] W. Ren, X. Li, H. Chen, V. Rakesh, Z. Wang, M. Das, and V. G. Honavar, \"Tablog: Test-time adaptation for tabular data using logic rules,\" in ICML, 2024, pp. 42417-42427. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.239, + 0.494, + 0.252 + ], + "angle": 0, + "content": "[342] J. Kaplan, S. McCandlish, T. Henighan, T. B. Brown, B. Chess," + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.055, + 0.494, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.055, + 0.925, + 0.09 + ], + "angle": 0, + "content": "R. Child, S. Gray, A. Radford, J. Wu, and D. Amodei, \"Scaling laws for neural language models,\" CoRR, vol. abs/2001.08361, 2020. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.926, + 0.124 + ], + "angle": 0, + "content": "[343] Z.-H. Zhou, \"Learnware: on the future of machine learning,\" Frontiers of Computer Science, vol. 10, no. 4, pp. 589-590, 2016. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.125, + 0.926, + 0.147 + ], + "angle": 0, + "content": "[344] Z.-H. Zhou and Z.-H. Tan, \"Learnware: small models do big,\" Science China Information Science, vol. 67, no. 1, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.148, + 0.926, + 0.181 + ], + "angle": 0, + "content": "[345] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, \"Annotatedtables: A large tabular dataset with language model annotations,\" CoRR, vol. abs/2406.16349, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.182, + 0.926, + 0.216 + ], + "angle": 0, + "content": "[346] Z.-H. Zhou, \"Learnability with time-sharing computational resource concerns,\" National Science Review, vol. 11, no. 10, p. nwae204, 06 2024. 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.217, + 0.926, + 0.251 + ], + "angle": 0, + "content": "[347] W. Liang, Y. Zhang, Y. Kwon, S. Yeung, and J. Y. Zou, \"Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning,\" in NeurIPS, 2022. 22" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.055, + 0.926, + 0.251 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_origin.pdf b/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b6b039b264165ee0cddb793c46a1a8534d10d493 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/5dd2ae1b-7385-4fdc-ac7c-1944262b4bd7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60de11b5d79836a8f04f12f2d1a67e92d98b096df9ff98e5c742dbd78a2484f5 +size 1862117 diff --git a/data/2025/2504_16xxx/2504.16109/full.md b/data/2025/2504_16xxx/2504.16109/full.md new file mode 100644 index 0000000000000000000000000000000000000000..edc5772f5b7288bfaab583d33ccbe62effe828f7 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/full.md @@ -0,0 +1,965 @@ +# Representation Learning for Tabular Data: A Comprehensive Survey + +Jun-Peng Jiang, Si-Yang Liu, Hao-Run Cai, Qile Zhou, Han-Jia Ye + +Abstract—Tabular data, structured as rows and columns, is among the most prevalent data types in machine learning classification and regression applications. Models for learning from tabular data have continuously evolved, with Deep Neural Networks (DNNs) recently demonstrating promising results through their capability of representation learning. In this survey, we systematically introduce the field of tabular representation learning, covering the background, challenges, and benchmarks, along with the pros and cons of using DNNs. We organize existing methods into three main categories according to their generalization capabilities: specialized, transferable, and general models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. We introduce a hierarchical taxonomy for specialized models based on the key aspects of tabular data—features, samples, and objectives—and delve into detailed strategies for obtaining high-quality feature- and sample-level representations. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks, leveraging knowledge acquired from homogeneous or heterogeneous sources, or even cross-modalities such as vision and language. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning. We group these general models based on the strategies used to adapt across heterogeneous datasets. Additionally, we explore ensemble methods, which integrate the strengths of multiple tabular models. Finally, we discuss representative extensions of tabular learning, including open-environment tabular machine learning, multimodal learning with tabular data, and tabular understanding tasks. More information can be found in the following repository: https://github.com/LAMDA-Tabular/Tabular-Survey. + +Index Terms—Tabular Data, Representation Learning, Deep Tabular Learning, Tabular Machine Learning, Tabular Foundation Model + +![](images/f93fe1bc007c8bd98d2af02027122592dc474f533fa54a8f3792835c82ce9f57.jpg) + +# 1 INTRODUCTION + +Tabular data, characterized by structured rows and columns, is one of the most prevalent data formats in real-world machine learning applications, spanning diverse domains such as finance [1], healthcare [2], education [3], recommendation systems [4], and scientific research. In particular, AI for scientific research (AI4science) has increasingly relied on tabular data, as numerous prominent datasets—such as those from genomics [5], chemistry [6], and climate science [7], [8]—naturally adopt tabular forms. + +Tabular data inherently organizes information in a structured, table-like format. In this survey, we focus primarily on supervised tabular machine learning tasks, specifically classification and regression. Beyond their structured organization, tabular datasets frequently include heterogeneous attributes [9], encompassing numerical, categorical, or mixed data types that may be either dense or sparse. Additionally, many tabular datasets present quality challenges, such as noisy measurements, missing values, outliers, inaccuracies [10], and privacy constraints [11], all of which complicate the modeling process. The most common supervised tabular tasks are classification and regression, where the goal is to learn mappings from training data to discrete or continuous targets, respectively. As illustrated in Figure 1, each row represents an instance (with its corresponding label), while each column corresponds to a specific attribute or feature [12]. + +![](images/e4038b53b6a263f14d47247efe8303fc0326a04166e3783818d71299bc25664d.jpg) +Figure 1: A brief introduction to tabular data and associated learning tasks. Each row represents an instance and each column corresponds to a specific attribute or feature, which can be numerical or categorical. The most common tabular machine learning tasks are classification and regression as shown in the right side of the figure. + +Ideally, learned mappings should generalize effectively, accurately predicting outcomes for new instances drawn from the same underlying distribution. + +Machine learning methods for tabular data have evolved significantly over the years [13], [14], [15], [16]. Recently, the rise of deep learning has profoundly impacted domains like computer vision [17] and natural language processing [18], where Deep Neural Networks (DNNs) extract semantic representations directly from raw inputs [19], [20], [21]. These learned representations have not only improved generalization but have also facilitated knowledge transfer across related tasks [22]. The flexibility of DNNs in modeling complex feature interactions and learning rich hierarchical + +![](images/0d9d9cb1c1ca63909087b16acb67b2648855940519a0c15a4594452e098aa2db.jpg) +Figure 2: We organize existing tabular classification/regression methods into three categories according to their generalization capabilities: specialized (left), transferable (middle), and general (right) models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning. + +structures has inspired significant interest in adapting deep learning techniques to tabular data. + +Indeed, DNNs were applied to tabular data decades ago, initially targeting dimensionality reduction and visualization tasks [23], [24], [25], [26], yet they typically struggled to match tree-based methods on standard classification and regression problems. Later advances in DNNs have led to significant improvements across various tabular-related applications, such as click-through rate prediction [27], [28], anomaly detection [29], recommendation systems [30], and time series forecasting [31], [32]. Modern deep learning approaches, benefiting from better-designed architectures, optimized training strategies, high-quality representations, have revitalized DNN performance on tabular data, often rivaling or surpassing traditional tree-based models [33], [34], [35]. Given the wide variety of approaches emerging in deep tabular modeling, a systematic overview that revisits critical factors and current methodologies in representation learning for tabular data has become increasingly necessary. + +This survey begins by introducing the background of tabular data learning, highlighting the challenges involved and critically examining the advantages and limitations of utilizing DNNs compared to classical—particularly tree-based—methods [36], [37], [38], [39]. Given the observed instability of method performance across different tabular datasets, we also discuss comprehensive strategies for dataset collection, evaluation, and analysis, aiming to establish robust criteria for aggregating performance metrics across multiple datasets [40], [41], [42], [43]. + +We broadly categorize deep tabular methods into three types: specialized methods, transferable methods, and general methods, distinguished by the scope of datasets on which they are trained and deployed, as well as their corresponding + +generalization capabilities (illustrated in Figure 2). Specialized tabular methods align closely with classical supervised models, typically trained and evaluated on data drawn from the same distribution. In contrast, transferable methods leverage knowledge from models pre-trained on one or multiple source datasets, subsequently fine-tuning these models on target datasets; the primary challenge here lies in addressing the heterogeneity between pre-trained sources and target tasks. The recently proposed general tabular methods—motivated by the remarkable "zero-shot" generalization abilities demonstrated by large language models (LLMs)—exhibit exceptional versatility. These general models can directly apply their learned representations to downstream tabular datasets without additional fine-tuning, achieving robust generalization due to advanced pre-training strategies. Although the generalization ability tends to increase from specialized to general models, it does not imply that specialized or transferable methods are less valuable; specialized models remain superior on large-scale datasets, and fine-tuning general models can further improve their predictive performance. Additionally, the first two types of methods provide foundational insights and valuable components that contribute significantly to advancements in general tabular models. + +For specialized methods, numerous designs have been proposed from diverse perspectives, and previous papers have often categorized these methods based primarily on their architectural characteristics or behaviors. Existing taxonomies [44], for example, group specialized methods into feature-preprocessing-based [33], [45], data-augmentation-based [46], [47], [48], [49], MLP variants [50], [34], specialized DNN architectures [51], [52], [53], [54], [55], [56], [57], [58], tree-mimic approaches [59], [60], [61], token-based tech + +niques [62], [63], [33], [64], [65], regularization-driven methods [66], [67], and neighborhood-based strategies [68], [69], [35]. However, such categorizations can appear scattered, making it difficult to connect the core ideas between methods placed in distinct groups. In contrast, this survey introduces a hierarchical taxonomy based on the key aspects of tabular data—features, samples, and objectives—providing a cohesive organizational framework. Our approach emphasizes detailed strategies for obtaining high-quality representations at both feature- and sample-levels. This unified perspective helps bridge core ideas across diverse methods, facilitating clearer comparative discussions and potentially guiding the design of future, more advanced tabular models. + +Instead of training a model from scratch on a single tabular dataset, transferable models leverage knowledge encoded in a pre-trained model from another dataset, which can significantly enhance the training process, especially when data or computational resources for the target task are limited. A major challenge in transferring knowledge across tabular tasks lies in the inherent heterogeneity between the source and target datasets, particularly differences in their feature and label spaces. In this survey, we adopt a broad perspective on transferable tabular models, categorizing methods based on the sources of their pre-trained knowledge. Specifically, we discuss models pre-trained on homogeneous tabular domains, such as self-supervised methods with additional pre-training steps on the target dataset itself [70], [71]; models pre-trained across heterogeneous tabular domains [72], [73], [64]; and methods transferring knowledge from other modalities, such as vision-based pre-trained models [74], [75], [76]. Additionally, since incorporating attribute semantics (when available) is a common strategy for bridging heterogeneous attribute spaces across tabular datasets [77], [78], [79], we also explore approaches leveraging language models in the final category. In particular, we further organize these language model-based strategies according to the methods they use to extract knowledge and the types of language models involved—ranging from small-scale language models to Large Language Models (LLMs) [80], [81], [82], [83]. + +Inspired by recent advancements in foundation models from vision and language domains [84], [85], general models—also known as tabular foundation models—expand the concept of transferable tabular models by enabling direct application to downstream tasks without additional fine-tuning. This capability, commonly referred to as the model's "zero-shot" ability, significantly enhances the model's usability across diverse tabular datasets. In contrast to transferable models, which primarily focus on bridging knowledge gaps between source and target datasets, general models aim to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. We categorize these general models based on the strategies used to achieve adaptiveness across diverse tabular tasks, specifically examining adaptations from both data-centric [86] and model-centric perspectives [87], [88]. Furthermore, we discuss critical branches of general tabular models in detail: the TabPFN variants leveraging in-context learning [89], [90], [91], and methods utilizing attribute and task semantics to unify heterogeneous tasks within a common representation framework [92], [93], [94]. + +Additionally, ensemble methods [95], [96], [91] are in + +produced, which improve the generalization ability based on the strengths of multiple tabular models. Finally, we briefly overview other relevant extensions of tabular learning, including clustering [97], [98], anomaly detection [99], [100], [101], data generation and imputation [102], [103], [104], interpretability [63], [105], [61], multimodal learning [106], [107], open-environment tabular machine learning [108], [109], [110], [111], and tabular understanding [112], [113]. By summarizing the state of the field and identifying open challenges, we aim to guide future research and applications in tabular data representation learning. + +# 2 BACKGROUND + +This section presents the (supervised) tabular machine learning task, including the notation of tabular data learning, the history of tabular data, the challenges of learning from tabular data, evaluation metrics, and tabular benchmarks. + +# 2.1 Learning with Tabular Data + +A supervised tabular dataset is formatted as $N$ examples and $d$ features/attributes corresponding to $N$ rows and $d$ columns in the table. An instance $\pmb{x}_i\in \mathbb{R}^d$ is depicted by its $d$ feature values. Assume $x_{i,j}$ as the $j$ -th feature of instance $\pmb{x}_i$ , it could be a numerical (continuous) one $x_{i,j}^{\mathrm{num}}\in \mathbb{R}$ , like the temperature of a region or the density of the object. $\pmb{x}_i$ can also be a categorical (discrete) value $x_{i,j}^{\mathrm{cat}}$ , like one of multiple colors, the location of a person, or even some textual descriptions of the instance. Each instance is associated with a label $y_i$ , where $y_i\in \{1, - 1\}$ in a binary classification task, $y_i\in [C] = \{1,\dots ,C\}$ in a multi-class classification task, and $y_i\in \mathbb{R}$ in a regression task. + +Remark 1. Ordinal regression [114], [115], also called ordinal classification, is a type of regression analysis used to predict an ordinal variable. It can be considered an intermediate problem between regression and classification. However, this survey primarily focuses on standard classification and regression tasks and does not specifically discuss ordinal regression. + +Given a tabular dataset $\mathcal{D} = \{(x_i, y_i)\}_{i=1}^N$ , we aim to learn a mapping $f$ on $\mathcal{D}$ that maps $x_i$ to its label $y_i$ . In other words, the model predicts $x_i$ with $\hat{y}_i = f(x_i)$ . The general objective learning $f$ follows the structural risk minimization: + +$$ +\min _ {f} \sum_ {\left(\boldsymbol {x} _ {i}, y _ {i}\right) \in \mathcal {D}} \ell (y, \hat {y} _ {i} = f \left(\boldsymbol {x} _ {i}\right)) + \Omega (f). \tag {1} +$$ + +$\ell (\cdot ,\cdot)$ measures the discrepancy between the predicted label $\hat{y}_i$ and the true label $y_{i},e.g.$ , cross-entropy in classification and mean square error in regression. $\Omega (\cdot)$ is the regularization on the model, which restricts the complexity of $f$ . We expect the learned $f$ is able to extend its ability to unseen instances sampled from the same distribution as $\mathcal{D}$ . + +Tabular methods differ in their strategies to implement $f$ . The "dummy" approach makes predictions based on training labels $\{y_i\}_{i=1}^N$ directly, which outputs the major class in the training set for classification and the average of all labels for regression, respectively. + +In a $C$ -class classification task, classical parametric methods implement $f$ with a linear mapping, i.e., $f(\pmb{x}_i) = \pmb{W}^\top \pmb{x}_i + \pmb{b}$ , where the classifier $\pmb{W} \in \mathbb{R}^{d \times C}$ and $\pmb{b} \in \mathbb{R}^C$ + +is the bias. With different loss functions, we can implement Logistic Regression, SVM, or even AdaBoost. In contrast, non-parametric methods implement the prediction via $f(\pmb{x}_i) = f(\pmb{x}_i, \mathcal{D})$ , depending on the whole training set. For example, KNN searches neighbors in the training set $\mathcal{D}$ with the $K$ smallest distance w.r.t. $\pmb{x}_i$ . KNN can be viewed as a specific label smoother, with a dynamic local region for every instance. [116] links KNN and Random Forest from their ways of smoothing training labels in their predictions. + +Deep tabular methods implement $f$ with a deep neural network, e.g. Most deep learning models could be decomposed into two parts, i.e., $f(\pmb{x}_i) = \pmb{W}^\top \phi(\pmb{x}_i) + \pmb{b}$ . Similar to the linear model, $\pmb{W}$ and $\pmb{b}$ are the components of linear classifier, with $\pmb{W} \in \mathbb{R}^{d' \times C}$ . $\phi$ maps the input vector $\pmb{x}_i$ into the $d'$ dimension space, which extracts semantic embeddings for the given tabular input. $\phi$ could be implemented with MLP or residual network. + +# 2.2 History of Tabular Data + +Historically, classical machine learning tasks were predominantly formulated with tabular data, or datasets readily transformed into a tabular representation without explicitly designating them as "tabular." In early literature, the term "tabular" typically referred to tables within relational databases [117], CSV files on the web [118], or tables in documents [119]. Relevant tasks included table extraction [120], parsing [121], understanding [122], and discovering association rules [123]. With the expansion of machine learning applications into other modalities such as images, texts, audio, and video, the classical vector-based data representations have come to be explicitly termed "tabular data." + +Early statistical approaches such as linear regression, logistic regression, linear discriminant analysis, and K-Nearest Neighbors (KNN) predate artificial intelligence. Classical learning methods further expanded across various paradigms, including decision trees [124], [125], multi-layer perceptrons (MLPs), support vector machines (SVMs), and nearest centroid classifiers [5], [14]. Ensemble methods enhanced predictive performance by aggregating outputs from multiple base learners [126], [127]. More recently, gradient boosting frameworks [128], [129], such as XGBoost [130], LightGBM [131], and CatBoost [132], have become prominent due to their effectiveness and efficiency in tabular data applications and competitions [133], [134]. + +With the development of deep learning, DNNs were applied to tabular classification and regression tasks decades ago, utilizing architectures such as stacked Restricted Boltzmann Machines and denoising autoencoders [135], [136], [137]. Early representation learning efforts primarily focused on dimensionality reduction and data visualization tasks [23], [24], [25], [26], yet these models struggled to surpass traditional tree-based methods in terms of generalization. However, advancements in neural network architectures and representation learning strategies have recently led to promising results in related tabular domains, including click-through rate prediction [27], [28], anomaly detection [138], [29], recommendation systems [139], [30], and time series forecasting [31], [140], [32], [141]. Innovations such as convolutional layers and learnable feature embeddings have improved the ability of deep models to capture high-order + +attribute relationships [142], [143]. While early deep tabular methods lagged behind ensemble tree-based models, recent techniques have demonstrated competitive or superior performance [33], [34], [35], affirming deep representation learning as a promising direction for tabular data modeling. + +While several survey papers have been published [9], [144], the field of tabular data has witnessed remarkable progress over the past two years. On one hand, the emergence of new specialized methods has introduced significant shifts in the landscape, motivating the need for our comprehensive taxonomy. On the other hand, the rise of transferable and general approaches has greatly enhanced the generality and applicability of tabular data modeling, which has been overlooked in previous works. + +# 2.3 Challenges of Learning from Tabular Data + +Different from other types of data sources, e.g., images and texts, there exist several challenges dealing with tabular datasets due to their characteristics. + +Heterogeneity of Features. Unlike continuous image data or token-based textual data, tabular datasets often contain both numerical and categorical attributes, each requiring distinct handling methods [9], [145]. Numerical features frequently exhibit varying ranges and distributions, necessitating normalization or scaling. Categorical features differ in cardinality and semantic interpretation, requiring encoding methods like one-hot vectors or embeddings. Consequently, tabular models must carefully handle these mixed data types to preserve the usability of each feature. + +Lack of Spatial Relationships. Tabular data inherently lacks spatial or sequential relationships that are naturally found in other modalities [74], [50]. The order of columns has no semantic or spatial meaning, making tabular data permutation-invariant regarding features. Moreover, standard tabular machine learning assumes rows are independently and identically distributed (i.i.d.), further eliminating temporal or sequential correlations present in data such as video or time series. This absence of inherent spatial or sequential structure challenges deep learning architectures traditionally designed to exploit such dependencies. + +Low-quality and Missing Data. Compared to image or text data, where contextual or spatial redundancies help manage missing or corrupted values, tabular data is more vulnerable to incomplete or erroneous entries [146], [147]. Missing values in tabular datasets can introduce significant biases and degrade prediction quality. Additionally, noisy or incorrect values can considerably affect model reliability. Data preprocessing steps, including data cleaning and imputation, become crucial to maintaining accuracy and robustness in tabular machine learning. + +Importance of Feature Engineering. Effective tabular models heavily depend on the quality of their input features [45], [148]. Unlike image or textual data, where DNNs inherently learn feature representations from raw data, tabular methods often require domain-specific knowledge and meticulous manual feature engineering. Identifying and modeling complex, nonlinear interactions among tabular features frequently demands sophisticated transformations and expert insight, significantly impacting the predictive performance of models [149]. + +Class Imbalance. Tabular datasets frequently exhibit imbalanced label distributions, especially in classification tasks, where certain categories are underrepresented [150], [151]. Class imbalance complicates model learning, leading to biased outcomes toward majority classes and poor performance on minority classes. Specialized methods such as oversampling, undersampling, or tailored loss functions (e.g., focal loss [152]) are required to address this imbalance effectively. Evaluation criteria like the AUC or F1-score further help assess model quality in imbalanced settings. Recent research highlights differences between deep and classical models in handling imbalance, emphasizing the need for careful consideration [153], [154], [155], [41]. + +Remark 2. Class imbalance has long been a known issue in the tabular domain, even before the rise of deep learning [156], and methods such as SMOTE [157], [158] can easily be extended to deep learning methods during preprocessing. However, Current deep tabular methods primarily assume that the training and testing data come from the same distribution, even in cases involving class imbalance. In addition, some class imbalance methods in visual domain can be readily extended to the tabular data learning [159], [160]. Therefore, we do not delve into class imbalance in this survey. + +Scalability to Large Datasets. Tabular datasets can become large-scale and high-dimensional, presenting computational and generalization challenges [161]. With increasing dimensionality, the risk of overfitting increases, especially when the number of features significantly surpasses the number of samples. Consequently, efficient training algorithms, memory management strategies, and sufficient computational resources become essential. Effectively scaling tabular models to handle large datasets while maintaining generalization ability remains a challenging but critical research area [162]. Model Selection and Hyperparameter Tuning. Tabular models are particularly sensitive to hyperparameter settings [163], [164]. Selecting an appropriate model architecture and tuning hyperparameters, such as learning rate, layer depth, or number of trees, can be computationally expensive and time-consuming. Despite the advancement of automated machine learning (AutoML) techniques [165], [166], [167], efficiently identifying optimal configurations for deep tabular methods under practical constraints remains challenging and critical for achieving high predictive performance. + +Domain-Specific Constraints. Certain application domains, such as healthcare or finance, impose additional regulatory or ethical requirements on model development [168]. For example, healthcare applications must comply with privacy standards like HIPAA [169] and provide explainability to clinicians. Financial models similarly must adhere to fairness regulations and industry standards. These constraints can restrict algorithm selection, necessitate interpretable predictions, and require additional validation, explainability, and auditability procedures [170], [171], [172]. + +# 2.4 Evaluation of a Tabular Method + +We present the evaluation of tabular methods, ranging from traditional to modern, to provide a comprehensive evaluation across different aspects. For a given model on a dataset $\mathcal{D}$ , + +we employ standard metrics that quantify the discrepancy between the predicted label $\hat{y}_i$ and the true label $y_i$ . + +Evaluation on A Single Task. For classification tasks, Accuracy (or Error Rate) is commonly employed as the primary metric. AUC and F1 scores are further used to address imbalanced label distributions, while Expected Calibration Error (ECE) [173], [174] calculates the weighted average error of the estimated probabilities. All criteria are the higher, the better, except the error rate and ECE. For regression tasks, common metrics include Mean Squared Error (MSE), Mean Absolute Error (MAE), and Root Mean Squared Error (RMSE), with MAE and RMSE sharing the scale of the original labels. Lower values denote superior performance. Additionally, the coefficient of determination $(\mathbb{R}^2)$ is employed, with higher values indicating a better fit. + +In tabular machine learning, the diversity of datasets makes it difficult for any single model to consistently excel across all scenarios. Therefore, evaluating models requires not only assessing their performance on individual datasets but also employing aggregated metrics that capture their overall effectiveness across multiple datasets. + +Evaluation on A Set of Tasks. Early research predominantly relied on Average Rank (Friedman Rank) [12], [39], often used in conjunction with Critical Difference Comparisons, to evaluate model performance across multiple datasets. Models are ranked per dataset based on a chosen metric (e.g., accuracy, AUC, RMSE), and the average rank is computed across datasets. To ensure statistical robustness, hypothesis tests were employed to assess the significance of ranking differences, providing a more reliable comparative analysis. For multiple comparisons, tests such as the Wilcoxon-Holm, Fredman, and Nemiyi tests are employed [175]. To address the potential degradation of average rank by poor performance on some datasets, the Probability of Achieving the Maximum Accuracy (PAMA) [12] is defined as the fraction of datasets in which a model attains the highest accuracy. An alternative to PAMA accounts for near-optimal performance, $P95$ quantifies the likelihood of a model attaining at least $95\%$ of the maximum accuracy, which is computed as the ratio of datasets where the classifier achieves at least $95\%$ of the maximum accuracy to the total number of datasets. + +As research progressed, more diverse evaluation metrics were introduced. The Arithmetic Mean of a chosen metric provides a direct comparison across datasets, but variations in the scales of evaluation metrics across datasets can distort results. To mitigate this issue, performance metrics are often normalized before aggregation, with normalized Accuracy applied to classification tasks and normalized RMSE (nRMSE) used for regression [36], [34]. Depending on the evaluation framework, Mean Normalized Error can be used, but its dependence on normalization can hinder independent optimization. To further address these limitations, the Shifted Geometric Mean (SGM) error was introduced, which aggregates errors multiplicatively, reducing sensitivity to extreme values and ensuring more stable cross-datasets/splits comparisons [34]. + +Beyond absolute performance, relative comparisons are also important. The Relative Improvement metric quantifies a model's performance gain over a baseline (e.g., a simple MLP), offering insight into efficiency relative to simpler alternatives [176]. More recently, drawing inspiration from the ELO rating system[177], [178], ELO-based evaluation has + +been introduced [179], modeling model-to-model comparisons as pairwise competitions across datasets. The ELO Score iteratively adjusts rankings based on relative performance, providing a more dynamic, fine-grained assessment. + +# 2.5 Tabular Benchmarks and Datasets + +This section introduces existing benchmarks and datasets, along with associated considerations for constructing the benchmarks and evaluation protocols. + +# 2.5.1 Popular Tabular Benchmarks and Datasets + +We first introduce several benchmarks based on raw features constructed from various aspects. Then, we present datasets with rich semantics, following some tabular toolboxes and evaluation protocols. + +Standard Benchmarks. Methods for tabular data have preferences depending on the dataset, and evaluating them on limited datasets can be easily influenced by randomness or other factors. Therefore, it's important to consider various aspects to ensure a more comprehensive and reliable benchmark evaluation. + +A comprehensive benchmark should cover a diverse set of datasets to test the model's generalization capabilities across different tasks and feature types. The benchmark should include datasets from different task types, including binary classification, multi-class classification, and regression tasks. [12] evaluates 179 classifiers across 17 families on 121 datasets, concluding that Random Forest variants were the most likely to perform best overall. [50] explores MLPs with parameterized techniques, such as ensembling and data augmentation, over 40 classification datasets. Similarly, [33] demonstrates the effectiveness of MLPs, ResNets, and transformer-based models on 11 datasets. [36] conducts experiments on 45 datasets, investigating the differences between tree-based and DNN-based methods. + +The benchmark should cover datasets with varying sizes, including datasets with a large number of samples and features as well as smaller datasets. The diversity of dataset sizes helps evaluate the scalability and efficiency of different models. [39] includes 176 classification datasets and evaluate 19 methods, comprising 8 classical and 11 deep methods. In this study, the pre-trained TabPFN model [89] emerges as the top performer on average, even when limited to randomly sampled training sets of 3000 examples. However, limited trials for hyperparameter tuning and strict time constraints in [39] may have led to suboptimal evaluations for some deep tabular methods [180]. + +To ensure robustness and generalization, datasets from multiple domains should be included. Common domains for tabular data include healthcare, biology, finance, education, and physics. Additionally, some datasets are derived from other domains, such as image or speech data, by feature extraction. [181] evaluates attention mechanisms and contrastive learning methods across 28 tabular datasets, comparing their performance with traditional deep learning and machine learning approaches. [44], with a particular focus on DNN-based models, uses a benchmark of over 300 tabular datasets spanning a wide range of task types, sizes, and domains. A more diverse collection allows us to assess whether a tabular method can generalize across applications. + +Semantic-Enriched Datasets. In addition, recent research has also focused on evaluating tabular data with rich semantics, such as incorporating meta information related to tasks or integrating attribute names. UniTabE [182] introduces a 7TB dataset containing 13 billion tabular examples for tabular pre-training, covering domains with investing, time series analysis, finance, economics, and with numerical, categorical, text data types. CM2 [79] proposes OpenTabs for crosstab pre-training, which contains an extensive collection of large-scale tables with column name semantics, including approximately 46M tabular samples. TP-BERTa [78] filters the OpenTabs for datasets with at least 10,000 samples and no more than 32 features, resulting in 101 binary classification datasets and 101 regression datasets with about 10 million samples. GTL [81] curates a collection of 384 public tabular datasets from Kaggle, which includes 176 classification and 208 regression tasks spanning a wide range of industrial domains. TabLib collects a set of 627M tables totaling 69TiB, along with 867B tokens of context [183]. TabLib was extracted from numerous file formats, including CSV, HTML, SQLite, PDF, Excel, and others, sourced from GitHub and Common Crawl. T4 (The Tremendous Tablib Trawl) [92] takes account of the inscrutable statistics and call sheets with personally identifiable information in TabLib and filters TabLib into a collection of 4M tables with 2.1B rows. + +Among these benchmarks and datasets, the semantic-rich ones are primarily used for pre-training LLMs on tabular data, while the others are mainly employed for evaluating standard methods. Besides, some toolboxes implement methods over tabular data, including those for classical methods, as well as those for deep tabular methods [184], [185], [186], [187], [188]. To establish a comprehensive tabular benchmark, several factors need to be considered, including the range of datasets and data quality. + +Remark 3. Recent studies have proposed alternative perspectives for tabular evaluations, such as focusing on dataset age [42], leveraging expert-level feature engineering [43], and considering dataset version [44]. Studies have also highlighted generalization in open word environments in tabular datasets [43], [109], where the distributions of training, validation, and test sets differ significantly. More discussions are in Section 9. Incorporating diverse, high-quality datasets helps build a reliable benchmark for meaningful model comparisons. + +# 2.5.2 Evaluation Protocols + +Given the strong sensitivity of tabular methods to data and the additional randomness in deep methods, robust evaluation is essential. Furthermore, due to the high computational cost of some methods, it is equally important to ensure evaluation efficiency. + +Model Selection. Model selection on the validation set involves both hyperparameter tuning and early stopping, which are essential for reliable evaluation. Due to the large number of hyperparameters in deep methods, automated methods like Optuna [189] are commonly used to explore hyperparameters through multiple trials [33], [69]. During tuning, models are evaluated on the validation split, while models can also be trained with multiple random seeds, providing more reliable evaluations. In each trial and the + +final training, early stopping [190] often employed to prevent overfitting, and the epoch with the best validation performance is selected as the final model. + +Performance Evaluation. To assess generalization and prevent overfitting, models are typically evaluated using separate train/val/test splits, with a typical split ratio of $64\% / 16\% / 20\%$ . However, such fixed splits may yield inconsistent results. With the rise of deep learning, researchers have proposed more robust evaluation protocols to better reflect model capabilities [191]. Two main approaches are commonly used: (1) fixing the data split and running multiple trials with different random seeds [54], [59], [105], [69], [62], [87], [33], [58], [192], [65], [71]; and (2) using cross-validation, where new train/val/test splits are generated in each fold [63], [89], [193], [68], [34]. A hybrid strategy combining both random seeds and cross-validation is also adopted [194]. + +Recent studies show that holdout-based hyperparameter tuning can be unstable and prone to overfitting to the validation set [195], [180]. [180] found it ineffective on most TabZilla [39] datasets and instead used 5-fold cross-validation for more robust hyperparameter selection. As a result, they found the key meta-feature findings reported in [39] no longer held. This observation was also discussed in [44], which further identified meta-features that have a greater impact on model performance. For small datasets, alternative strategies have been proposed [196], [197], [198]. However, this approach significantly reduces the efficiency of hyperparameter search. [199] showed that simply reshuffling data splits can often improve generalization, making holdout selection competitive with cross-validation while remaining more computationally efficient. + +# 3 FROM CLASSICAL TO DEEP METHOD + +We present possible advantages of deep learning for tabular data, as well as the potential challenges of deep learning when compared with tree-based methods. + +# 3.1 Advantages of deep representation learning + +Deep tabular models offer several advantages beyond performance when compared with classical methods. + +Ability to Model Complex Feature Interactions. DNNs are particularly adept at capturing high-order, non-linear interactions between features, which may be challenging for traditional models like linear regression or decision trees [51], [54]. By learning a hierarchical representation of features, DNNs allow low-level feature interactions to be captured in the initial layers, while higher-order interactions are identified in deeper layers. This ability to automatically learn complex relationships makes DNNs highly effective in capturing intricate dependencies within tabular data. + +End-to-End Learning. Unlike traditional machine learning methods, which often involve separate steps for feature engineering, preprocessing, and model tuning, DNNs can process raw features and automatically extract useful representations without complex manual transformations. This end-to-end learning approach reduces human bias and simplifies the workflow, making the process more efficient. DNNs are trained through gradient optimization, enabling + +a unified, streamlined solution for complex tasks [33], [107]. Additionally, deep models support multi-task learning, allowing related tasks to benefit from shared representations, enhancing both performance and efficiency [200], [70], [49]. Integration with Other Modalities. Deep tabular methods excel in multi-modal pipelines, where tabular data is integrated with other modalities, such as images, audio, or text. In AI4science applications, for instance, tabular data might be combined with image data [106], [107] (e.g., in medical imaging applications) or time-series data [201], [202] (e.g., in forecasting tasks). DNNs are well-suited to model interactions between heterogeneous data types, improving the overall performance. By jointly learning from multiple data sources, DNNs enhance their ability to make more accurate and comprehensive predictions across domains. + +Flexibility with Dynamic Environments. DNN-based methods benefit from the flexibility of gradient-based optimization, which allows efficient and iterative training. This flexibility makes DNNs adaptable to changing objectives without significant modifications, unlike tree-based models that often require specialized methods for different tasks [9]. Moreover, DNNs excel in dynamic environments, such as real-time predictions, financial analysis, and decision-making systems, where feature relationships may shift. This adaptability makes them suitable for online learning or incremental training, where new data is continuously integrated without retraining from scratch [203], [204]. + +Long-Term Knowledge Transfer and Learning. DNNs are capable of long-term learning and knowledge transfer, which allows them to retain valuable knowledge gained from training on diverse tasks [205]. Once trained on a broad set of tasks, DNNs can transfer this knowledge to related domains, reducing the need for complete retraining [206]. This is especially advantageous in fields like AI4science, where a model trained on one type of scientific data can be adapted to other related domains, saving both time and computational resources. This ability to transfer knowledge across tasks is a key advantage of deep learning, enabling more efficient use of data and model capabilities over time. + +# 3.2 Debates between Tree-Based Methods and DNNs + +Although deep tabular methods have shown great potential in learning semantic representations and constructing nonlinear predictors, their initial performance often struggles to surpass that of classical tree-based ensemble methods, such as Gradient Boosted Decision Trees (GBDT). Many studies still treat GBDT approaches as strong baselines [36], [39], and in some cases, the advantages of deep tabular methods diminish as the number of evaluation datasets increases. + +Several reasons contribute to why tree-based methods retain their advantages over DNNs in many tabular tasks: + +Better Handling of High-Frequency Data. Tree-based methods, particularly GBDT models, are highly efficient at handling high-frequency data or dense datasets with many small variations [38]. These models build decision trees by recursively splitting the data at the most informative feature points, capturing both local and global patterns efficiently. DNNs, on the other hand, may not capture fine-grained patterns as effectively without extensive regularization or tuning [207], [208]. To address this limitation, [38] introduced frequency reduction as an inductive bias through + +the addition of scaling layers, while [45] demonstrated that periodic activation functions can significantly enhance neural networks' ability to learn high-frequency functions. + +Natural Handling of Mixed Data Types. Tabular data often includes a combination of numerical, categorical, and ordinal features [9], [44], [209]. Tree-based models are particularly strong when working with mixed data types, as they can handle categorical features directly without requiring one-hot encoding or embeddings. This ability to work with raw categorical data simplifies the preprocessing pipeline significantly. DNNs, however, generally require encoding techniques (e.g., one-hot encoding or learned embeddings) for categorical features, adding complexity and potentially leading to suboptimal performance [63]. + +Lower Computational Requirements for Training and Inference. For certain tasks, tree-based models tend to be more computationally efficient than DNNs [33]. GBDTs and other decision tree-based models can train relatively quickly and are less computationally intensive than deep neural networks [210], [39]. This is especially true when the dataset is not massive or when the model needs to be trained and deployed rapidly. DNNs, on the other hand, often require significant computational resources (e.g., GPUs, longer training times) to achieve comparable performance, making them less ideal in resource-constrained environments [211], [88]. + +Robustness to Noisy and Missing Data. Tree-based models are generally more robust to noisy data and missing values. When training a decision tree, missing values can be handled through optimal splitting that accommodates absent data, and trees can effectively deal with noisy or inconsistent data points [36]. DNNs, in contrast, are more sensitive to noise and often require careful preprocessing or specific techniques (e.g., data imputation or noise filtering) to avoid performance degradation with noisy or missing data [65], [89]. + +Interpretability and Transparency. Tree-based models offer a significant advantage in terms of interpretability [60], [61], [105]. The decision-making process of models like GBDT can be easily visualized in the form of decision paths, and feature importance can be directly extracted [130], [132], [131]. This transparency makes tree-based models appealing in domains where model explainability is crucial, such as in finance, healthcare, and regulated industries. Although interpretability techniques like LIME [212] and SHAP [213] exist for DNNs, tree-based models still tend to be more intuitive and easier to explain, especially in complex decision-making environments. Recent works [214], [60], [59], [193] have sought to bridge this gap by enhancing neural network interpretability through emulation of tree-based model behaviors. + +Handling Outliers and Skewed Data. Tree-based methods are often better at handling outliers and skewed distributions in the data. When a feature exhibits extreme values or skewed distributions, decision trees are inherently less sensitive to such anomalies because they create splits based on feature ranges that naturally isolate outliers. This characteristic can make them more robust than DNNs, which may require specialized loss functions or techniques (e.g., robust scaling or outlier removal) to handle such data points [43], [109]. + +# 4 TAXONOMY OF SPECIALIZED METHODS + +Similar to the evolution of deep learning, which progresses from specialized learning to transfer learning and ultimately to foundation models [244], we categorize deep tabular methods into three groups, as shown in Figure 2: specialized methods, transferable methods, and general methods. This classification reflects both the evolutionary development of deep learning techniques and the increasing generalization capabilities of these models. + +Specialized methods, being the earliest developed and most widely used category, will be our starting point for discussion. Tabular data consists of features (columns), samples (rows), and objectives (labels), which together define the structure and the task objectives. We emphasize detailed strategies for obtaining high-quality representations at both feature- and sample-level for the target task. Specifically, given the input data, according to the general learning objective in Equation 1, we consider how to transform the tabular input $x_{i}$ (feature aspect), how to construct relationships between samples (sample aspect), how to design the objective $\ell(\cdot)$ and regularize $\Omega(\cdot)$ (objective aspect). In particular, + +- Feature Aspect. We focus on how to transform the raw tabular input (in various forms) into intermediate representations. We consider two types of features: numerical and categorical. By explicitly modeling the relationships between the two features (e.g., feature importance and interactions), we are able to enhance the model's understanding of the input space. + +- Sample Aspect. In addition to features, we explore how to retrieve and utilize neighboring samples to capture intersample dependencies, thereby improving predictions. In order to improve the model's ability to make predictions, we explore the relationships between a target sample and its "extracted neighbors." + +- Objective Aspect. We examine how to modify the loss function and overall objective to introduce inductive biases. By directly guiding the learning process with the target variables, we incorporate prior knowledge or task-specific preferences into the model, thereby improving its generalizability and interpretability. + +In specialized methods, we focus solely on learning from pure data, excluding feature semantics considered in transferable methods (in Section 6), as they leverage the capabilities of language models. Since specialized methods encompass a wide range of approaches, and feature-aspect methods are the most extensive part of them, we will first introduce sample-aspect methods and objective-aspect methods in the following subsections. In Section 5, we will provide a detailed introduction to feature-aspect methods. + +# 4.1 Sample-aspect Specialized Methods + +Sample interaction methods take a retrieval-based approach, focusing on relationships between individual samples rather than features. In a tabular dataset, each sample $x_{i}$ represents a row with $d$ features, and the goal is to leverage relationships between a target sample and its "extracted neighbors" to improve predictions. + +The general form for the sample interaction methods can be expressed as: + +$$ +\hat {y} _ {i} = f \left(\mathcal {R} \left(\boldsymbol {x} _ {i}, \mathcal {D}; \Phi\right)\right), \tag {2} +$$ + +Table 1: The taxonomy of representation learning for tabular data. The shade color in the last column denotes the subcategory, which is consistent with Figure 3. + +
Algorithm CategoryReference
Specialized Methods§ 5Feature-aspect MethodsFeature Encoding[33], [45], [64]
Feature Selection[59], [60], [105], [61], [193]
Feature Projection[52], [33], [34], [58]
Feature Interaction[54], [62], [63], [55], [65], [49], [215]
§ 4.1Sample-aspect MethodsSample Interaction[70], [216], [217], [192], [67]
Neighbor Retrieval[218], [68], [69], [35]
§ 4.2Objective-aspect MethodsTraining Objective[67]
Training Regularization[219], [50], [66]
§ 6Transferable MethodsHomogeneous[63], [48], [70], [220], [46], [221], [222], [223], [47], [224], [225], [226], [227]
Heterogeneous[228], [229], [222], [72], [73], [64], [230], [231]
Language Model[77], [232], [182], [79], [78], [233], [234], [82], [83], [235], [236], [80], [237]
Vision Model[238], [239], [240], [74], [75], [241], [242], [76]
§ 7General MehtodsRaw-Feature-based[86], [87], [88]
TabPFN Variants[89], [91]
Semantics-based[92], [93], [94], [243]
+ +![](images/65ce6230b3d29ce1199fc2127cfe0a5435c735cc905b76dbc127228712d8de2d.jpg) +Figure 3: The roadmap of deep representation learning tabular methods. We organize representative methods chronologically to show the concentration at different stages. Different colors of these methods denote the sub-categories. + +where $\mathcal{D}$ is the set of all samples (training data) available for retrieval or learning. $\mathcal{R}(\cdot)$ is the sample interaction module, which retrieves or aggregates information from relevant samples in $S$ for the target sample $\boldsymbol{x}_i$ . $\Phi$ represents the learnable parameters of $\mathcal{R}$ . $f(\cdot)$ is the prediction head that maps the aggregated information to the final output $\hat{y}_i$ . + +Sample aspect approaches can be broadly categorized into two main strategies. The first approach introduces the modeling of sample relationships $\mathcal{R}$ during representation training, allowing the model to learn better representations by capturing inter-sample dependencies. The second ap + +proach is retrieval-based models, which directly predict outcomes by learning how to retrieve and utilize neighbors' relationships $\mathcal{R}$ when testing. + +Sample Interaction. These methods assist in representation learning by allowing the model to capture relationships between samples, which in turn helps generate a more robust representation during training. During testing, the model becomes more sensitive to each sample without interaction. + +SAINT [70] introduces inter-sample attention beyond inter-attribute attention, which improves row classification by relating each row to others in the table. NPT [216] + +extends this via non-parametric Transformers, whereas Hopular [217] employs Hopfield networks, sharing conceptual alignment with SAINT [70]. Unlike nearest-neighbor classification, the distance metric is learned end-to-end. Prompt [192] posits that the feature importance in tabular data is sample-dependent. During feature extraction, it treats the information between samples as prompts. PTaRL [67] identifies two issues in the representation of tabular data samples: entanglement and localization. It addresses these by modeling global sample relationships through prototype generation and representation projection, helping the model produce clear and consistent decisions. + +Neighbor Retrieval. These methods construct high-quality contexts to aid prediction by retrieving valuable neighbors and designing efficient ways to utilize them based on the relationships between samples. The training data is used to assist during testing. + +DNNR [68] argues that a key advantage of neighbor-based methods is the model's transparency, meaning that the model's decisions can be explained by inspecting its components. It enhances predictive performance by incorporating local gradient estimation and Taylor series approximation into the KNN framework. TabR [69] proposes that, compared to purely parametric (e.g., retrieval-free) models, retrieval-based models can achieve superior performance while also exhibiting several practically important properties, such as the ability for incremental learning and enhanced robustness. It encodes all candidate samples and then employs an attention-like mechanism to retrieve the samples that aid in making predictions, as explored in [218]. ModernNCA [35] revitalizes the classic tabular prediction method, Neighbourhood Component Analysis (NCA) [245], by designing and incorporating deep learning architectures and strategies. The resulting method efficiently leverages neighboring samples for prediction. + +Remark 4. The neighborhood-based approach closely resembles + +bles the current in-context learning [246] mechanism. In particular, the in-context learning used in general models like TabPFN [89], [91] can aslo be considered a form of the neighborhood method. This concept of neighborhood not only helps in standard tasks, but also enhances transferable and general methods. For example, LoCalPFN [90] highlights that employing local linear regression can lead to more expressive decision boundaries, while utilizing local context allows performance to scale with the size of the training dataset. + +# 4.2 Objective-aspect Specialized Methods + +The general objective learning $f$ follows the structural risk minimization as in Equation 1, where $\ell$ is the loss function to set the training objective between the prediction and the ground truth label. $\Omega(\cdot)$ is the regularization on the model, which directs the objective or restricts the complexity of $f$ . + +In traditional machine learning, models often rely on explicit regularization techniques on $\Omega$ to ensure good generalization. Methods such as decision trees, support vector machines, and linear models typically incorporate regularization terms directly into the loss function to control model complexity and prevent overfitting. For example, in linear regression, regularization methods like L1 (Lasso) [247], L2 + +![](images/6bfc55f39ab4036bf3a443b1a972dd614f4618aff254eec32b873d00f78d2a83.jpg) + +![](images/8beeccceaf07e5ef22e0485f90f15baa5f05fc84f8c7d42c772317522657ca6d.jpg) +Figure 4: Illustration of feature-aspect methods, including feature encoding, feature selection, feature projection and feature interaction. + +![](images/ba9c9bbbcbdaf4046aec7ae3197352481649a45b1f333aeeab8500a90b2de4d9.jpg) + +![](images/f88240d6a6ad7bfcf76cb01cc12c3e87300a15292b268038c8a926ab317be95e.jpg) + +(Ridge) [248], or Elastic-Nets [249] penalize large coefficients, effectively controlling the complexity of the model and helping to maintain a balance between bias and variance. + +Objective-aspect methods in deep learning are an extension of these traditional regularization techniques, where inductive bias is introduced by adjusting the loss function $\ell$ or adding regularizers $\Omega$ . In the training process, the goal is to leverage regularization on the model to improve predictions. + +Remark 5. Pre-train methods such as homogeneous transferable tabular methods in Section 6 also change the loss function $\ell$ or the regularization $\Omega$ to help pre-training. We will discuss these methods later. + +Objective-aspect approaches can be broadly categorized into two main strategies. The first approach involves training objectives, which enhance the model with a specialized ability. The second approach introduces a regularizer, allowing the model to learn strong generalized representations. + +Training Objective. For training objectives, PTaRL [67] constructs prototype-based projection space and learns the disentangled representation around global prototypes. PTaRL uses a diversification constraint for representation calibration and introduces a matrix orthogonalization constraint to ensure the independence of global prototypes. + +Training Regularization. For training regularization, RLNs [219] overcome the challenge of an intractable number of hyperparameters during training by introducing an efficient tuning scheme, which minimizes a new "Counterfactual Loss." In RLNs, the regularization coefficients are optimized together with learning the network weight parameters. RLNs produce extremely sparse networks, thus providing more interpretable models and revealing the importance that the network assigns to different inputs. [50] introduces "cocktails," dataset-specific combinations of 13 regularization techniques, showing that even simple neural networks can outperform tree-based architectures when optimized with these methods. TANGOS [66] introduces a regularization-based improvement. It regularizes neuron attributions to encourage neurons to specialize and become orthogonal to one another. + +# 5 FEATURE-ASPECT SPECIALIZED METHODS + +Tabular data is characterized by a diverse set of features, including both categorical and numerical variables. The complexity of tabular data arises from the variety of feature types, their interrelationships, and the high dimensionality often present. Traditional methods often rely on manual feature engineering, using techniques such as encoding categorical variables and selecting relevant features to improve model performance and reduce overfitting. + +As deep learning has evolved, these traditional techniques have been integrated and expanded upon. Deep tabular models are capable of automatically learning complex feature representations, reducing the need for explicit feature engineering. Feature-aspect methods, such as feature encoding, selection, projection, and interaction, are essential for transforming raw tabular inputs into more informative intermediate forms. These methods help improve a model's ability to capture intricate relationships between features, thereby enhancing its generalization capabilities. + +# 5.1 Feature Encoding + +Various encoding strategies have been explored for both categorical and numerical features in tabular data. Additionally, with the advancement of the attention mechanism, feature tokenization, similar to word embeddings in natural language processing, transforms all features into embeddings. + +Categorical Encoding. Categorical variables represent types of data which may be divided into groups. Examples of categorical variables are race, sex, age group, and educational level [250]. The categorical features are usually transformed in an index (integer). The two most popular techniques are an Ordinal Encoding and a One-Hot Encoding. + +Ordinal Encoding assigns each unique category a distinct integer value. This approach is useful when the categorical variable has an inherent order, such as "low," "medium," and "high." The main advantage of Ordinal Encoding is its simplicity and efficiency, as it transforms the categorical variable into a single numeric column. However, it assumes that there is an ordinal relationship between the categories, which may not always be the case. For instance, if the categorical variable represents "color" with categories such as "red," "blue," and "green," applying Ordinal Encoding would introduce an artificial order that does not reflect any meaningful ranking. + +On the other hand, One-Hot Encoding creates a new binary column for each unique category in the original categorical variable. For example, for a variable "color" with three categories (red, blue, and green), One-Hot Encoding would generate three binary columns: "is_red," "is_green," and "is_green," encoding red as $(1,0,0)$ , blue as $(0,1,0)$ and green as $(0,0,1)$ . Each column indicates the presence or absence of that particular category. One-Hot Encoding is useful for nominal categorical variables, where no order exists between the categories. While One-Hot Encoding avoids the assumption of ordinal relationships, it can lead to a high-dimensional feature space if the categorical variable has many unique values, which may result in increased computational costs and potential issues with overfitting. + +In some cases, more advanced encoding techniques are used to address the limitations of these basic approaches. + +For example, Target Encoding assigns each category a value based on the mean of the target variable for that category. This method can be useful when there is a strong relationship between the categorical features and the target. In Leave-one-out embedding, every category is replaced with the mean of the target variable of that category, which excludes the current row to avoid overfitting. + +Numerical Encoding. For encoding, MLP-PLR [45] introduces two numerical encoding methods: Piecewise Linear Encoding (PLE) and Periodic Activation Functions. These encoding methods can be integrated with other differentiable layers (e.g., Linear, ReLU) to enhance performance. PLE produces alternative initial representations for the original scalar values and is based on feature binning. Periodic Activation Functions take into account the fact the embedding framework where all features are computed independently of each other forbids mixing features during the embedding process and train the pre-activation coefficients instead of keeping them fixed. [38] utilizes tools from spectral analysis, showing that functions described by tabular datasets often have high irregularity, and can be smoothed by transformations such as scaling and ranking to improve performance. They propose "frequency reduction" as an inductive bias during training. + +Feature Tokenization. Feature tokenizer performs a similar role to the feature extractor in traditional models. It transforms the input features to embeddings [62], [33]. Since the feature representations of features are very sparse and high-dimensional, a common way is to represent them into low-dimensional spaces (e.g., word embeddings). + +The general form for feature tokenization can be expressed as: + +$$ +\boldsymbol {T} _ {i, j} = \boldsymbol {b} _ {j} + \mathcal {T} \left(x _ {i, j}; \Psi\right) \in \mathbb {R} ^ {t}, \tag {3} +$$ + +where $\mathcal{T}(\cdot)$ is the feature tokenizer module, which transforms the input feature vector $\pmb{x}_i\in \mathbb{R}^d$ to a token embedding $T_{i,j}\in \mathbb{R}^t$ . $t$ is the dimension of token embedding. $\pmb{b}_{j}$ is the $j$ -th feature bias. $\mathcal{T}$ can be implemented with different forms. $\Psi$ represents the learnable parameters of $\mathcal{T}$ + +In AutoInt [62], both the categorical and numerical features are embedded into low-dimensional spaces, which reduces the dimension of the input features and meanwhile allows different types of features to interact with each other. The embeddings of categorical features are computed by multiplying the embedding matrix with the multi-hot vector, while a corresponding embedding vector represents numerical features. TabTransformer [63] embed each categorical feature into a parametric embedding of dimension $t$ using Column embedding. An embedding vector is assigned to each feature, and a set of embeddings is constructed for all categorical features. Unlike TabTransformer, SAINT [70] proposes projecting numerical features into a $t$ -dimensional space before passing their embedding through the transformer encoder. FT-Transformer [33] adapts the Transformer architecture for tabular data, where all features are transformed to embeddings and applies a stack of Transformer layers to the embeddings. Specifically, the numerical tokenizer is implemented as the element-wise multiplication $\boldsymbol{T}_i^{\mathrm{num}} = \boldsymbol{b}_i^{\mathrm{num}} + x_i^{\mathrm{num}} \cdot \boldsymbol{W}_i^{\mathrm{num}}$ , and the categorical tokenizer is implemented as the lookup table $\boldsymbol{T}_i^{\mathrm{cat}} = \boldsymbol{b}_i^{\mathrm{cat}} + \boldsymbol{e}_i^T \boldsymbol{W}_i^{\mathrm{cat}}$ , where $\boldsymbol{e}_i^T$ is a one-hot vector for the corresponding categorical feature. Other transformer-based + +methods, like [65], [72], [230], [215], use the same feature tokenizer as FT-Transformer. + +# 5.2 Feature Selection + +The high dimensionality of tabular data often causes overfitting, where the model focuses on irrelevant features and neglects the important ones. Feature selection reduces the number of features, retaining only the most valuable information. This helps prevent overfitting, improves generalization, and reduces computational complexity. + +Traditional tree-based models facilitate automatic feature selection by evaluating the impact of each feature on the target during the construction process. Decision trees utilize metrics such as information gain or the Gini index for feature selection, while ensemble methods like random forests determine feature importance by assessing each feature's contribution [251], [252], [253]. Recently, modern deep learning methods for tabular data often mimic trees' structures for feature selection. + +GrowNet [59] and NODE [60] primarily mimic ensemble techniques. Inspired by GBDT, GrowNet designs a framework for building DNNs with multiple weak learners, where each learner's input consists of the original features plus the penultimate layer output from the previous learner. NODE uses a differentiable Oblivious Decision Tree as the base model, applying Bagging within each layer and Stacking across layers in a multi-layered structure. To make GAM [254] scalable and effective, NODE-GAM [61] modifies NODE to be a GAM, allowing GAM to learn quick, nonlinear jumps that better match patterns in real data. + +TabNet [105] and GRANDE [193] focus more on how tree models handle features. TabNet not only retains the representation learning capabilities of DNNs through self-supervised learning, but also incorporates the interpretability of tree models and the benefits of sparse feature selection, with a model structure designed for both feature selection and computation. GRANDE argues that the hard splits used by tree models are a key advantage over deep models, and thus proposes a method for learning hard, axis-aligned tree ensembles using gradient descent. GRANDE combines the beneficial inductive bias of axis-aligned splits with the flexibility provided by gradient descent optimization. + +# 5.3 Feature Projection + +Feature projection methods aim to project the raw data into a middle form, enhancing the representation ability for later architectures. Feature projection methods can be broadly categorized into two main approaches: MLP variants and special designed architectures. These approaches aim to enhance the model's ability to represent complex features for underlying feature structures. + +MLP Variants. For model architecture, RTDL [33] investigates both ResNet-like and Transformer-based architectures tailored for tabular data, proposing simple yet effective adaptations of these widely-used deep models. In particular, the MLP architecture is constructed by stacking multiple blocks consisting of Linear layers, ReLU activations, and Dropout, which transform the raw tabular features into a fixed-dimensional hidden representation. A final linear layer is then used as the classification head. The paper highlights + +an important insight: with proper hyperparameter tuning, even simple architectures like MLP and ResNet can achieve competitive performance on tabular benchmarks. + +Another contemporaneous work [50] enhances the MLP architecture by equipping it with a comprehensive suite of modern regularization techniques. Instead of introducing architectural innovations, this study focuses on systematically exploring combinations of 13 different regularization methods to identify an effective "regularization cocktail" for plain MLPs. The results demonstrate two key findings: (i) a well-regularized vanilla MLP can significantly outperform many recent, specialized neural architectures designed for tabular data; and (ii) such MLPs can even surpass strong traditional machine learning models like XGBoost across a range of benchmarks. For a more comprehensive strategy, RealMLP [34] explores multiple aspects including preprocessing, hyperparameters, architecture, regularization, and initialization. + +Special Designed Architectures. For units, motivated by the observation that normalization techniques are prone to disturbances during training, SNN [52] proposes the Scaled Exponential Linear Unit (SELU) to improve deep models for tabular data. NAMs [255] uses exp-centered (ExU) hidden units to improve the learnability for fitting jumpy functions. BiSHop [58] uses a dual-component approach, sequentially processing data both column-wise and row-wise through two interconnected directional learning modules. They use layers of generalized sparse modern Hopfield layers, a sparse extension of the modern Hopfield model with learnable sparsity. + +# 5.4 Feature Interaction + +Feature interaction methods aim to model relationships among features to enhance the representation power of deep learning models on tabular data. In tabular datasets, each sample $\boldsymbol{x}_i \in \mathbb{R}^d$ is described by $d$ features, and the goal is to transform these raw features into enriched representations that improve predictive performance. + +The general form for feature interaction methods can be expressed as: + +$$ +\hat {y} _ {i} = f \left(\mathcal {H} \left(\boldsymbol {x} _ {i}; \Theta\right)\right), \tag {4} +$$ + +where $\pmb{x}_i\in \mathbb{R}^d$ is the input feature vector for a single instance, $\mathcal{H}(\cdot)$ is the feature interaction module, which transforms the input $\pmb{x}$ by capturing feature dependencies or generating higher-order feature interactions. $\Theta$ represents the learnable parameters of $\mathcal{H}$ . $f(\cdot)$ is the prediction head that maps the transformed representation to the final output $\hat{y}$ . + +Feature interaction methods can be broadly categorized into two main approaches: the design of automatic feature interaction modules and the mining of implicit feature relationships. These approaches aim to enhance the model's ability to learn complex feature interactions and underlying feature structures within tabular data. + +Automatic Feature Interaction Modules. These methods do not assume specific feature types within the tabular dataset. Instead, they focus on improving the feature interaction process, enabling the model to learn complex, high-order feature relationships autonomously. + +DCNv2 [54] improves the learning of the model's feature interaction by improving the "Cross Network" structure. It + +employs low-rank methods to approximate feature crosses in subspaces and then integrates these subspaces using a gating mechanism. AutoInt [62] maps the original sparse high-dimensional feature vectors into a low-dimensional space and models high-order feature interactions by stacking interaction layers with a multi-head attention mechanism. Unlike AutoInt, the TabTransformer[63] only maps categorical features into contextual embeddings and feeds them into a Transformer model, while numerical continuous features are directly concatenated with the interacted contextual embeddings. When tabular data contains only numerical features, TabTransformer behaves in an MLP-like manner. Conversely, when the data contains only categorical features, TabTransformer operates similarly to AutoInt. + +Implicit Feature Relationships. Methods in this category typically assume that features in tabular data can be abstracted into implicit types and that it is necessary to design a suitable feature learning process to adapt to the characteristics of different types of features. + +DANets [55] propose the existence of underlying feature groups in tabular data, where features within each group are correlated. They learn to group input features and perform further feature abstraction. SwitchTab [49] introduces the idea of extracting sample-specific "Salient Features" and sample-shared "Mutual Information" in tabular features. It leverages self-supervised learning to assist in learning feature representations. ExcelFormer [65] argues that while DNN assigns weights to each feature, it does not actively exclude irrelevant features. To address this, it introduces Semi-Permeable Attention for feature interaction, which allows features with lower information content to access information from more informative features while preventing highly informative features from being influenced by less relevant ones. AMFormer [215] proposes the hypothesis that arithmetic feature interactions are crucial for deep tabular models. Based on the Transformer architecture, it introduces components designed to extract both additive and multiplicative interaction information. + +# 6 FROM SPECIALIZED TO TRANSFERABLE MODEL + +Instead of training a tabular model from scratch, learning based on a Pre-Trained Model (PTM) may increase the learning efficacy and reduce the resource and data requirement. For example, in a house prices prediction task, training a regressor in a certain area may benefit from a well-trained predictor from its neighborhood. + +Learning by reusing the PTM usually contains two stages. The first is the pre-training of a tabular model, from one or more upstream tasks. Given the PTM and a downstream task, an adaptation strategy is needed to transform the PTM to the target task or facilitate the learning of the target model. Formally, a well-trained model $g_{\Theta}$ is often available and can be leveraged to facilitate the training of $f_{\theta}$ over $\mathcal{D}$ . Here, $g_{\Theta}$ is pre-trained on a dataset $\mathcal{D}' = \{(x_j', y_j')\}_{j=1}^{N'}$ with instances $x_j' \in \mathbb{R}^{d'}$ and labels $y_j' \in [C']$ . To reuse expert knowledge in $g_{\Theta}$ , an adaptation strategy is applied: $f_{\theta} = \text{Adapt}(f_{\theta_0} \mid \mathcal{D}, g_{\Theta})$ , where $\theta_0$ is the initialization of the model. The notation could also be extended to cases with more than one PTM. The main challenge to reuse one or more PTMs is to bridge the gap between the PTM and the + +target tabular model [256]. We categorize PTMs into three kinds based on the source of PTM $g_{\Theta}$ . + +Homogeneous Transferable Tabular Model. First, the PTM may come from the same form of task (with $d' = d$ and $C' = C$ , but with different distributions $\operatorname{Pr}(\mathcal{D}') \neq \operatorname{Pr}(\mathcal{D})$ or model families $g \neq f$ ). For example, those pre-trained from other domains [71], or those unlabeled instances [48], [70]. + +Heterogeneous Transferable Tabular Model. In addition, we consider a PTM pre-trained from a slightly different task with $\mathcal{D}$ . In addition to the previous difference, the PTM $g_{\Theta}$ may differ from $f_{\theta}$ in feature dimension $(d' \neq d)$ or target class set $(C' \neq C)$ , so the adaptation method $\mathbf{Adapt}(\cdot)$ must handle such heterogeneity [64], [230]. + +Cross-Modal Transferable Tabular Model. Moreover, the pre-trained model could also be constructed from another modality, such as vision and language domains. The cross-modality PTM is hard to be applied to the tabular prediction task in most cases, so auxiliary information from the tabular task like the semantic meaning of attributes (i.e., the attribute names) are usually assumed to be available in this case, where PTM like large language models may provide the latent semantic meanings as external knowledge [77], [73]. + +The main limitation of the transferable tabular model is the assumption that the data distribution of the well-trained model should be similar to the distribution of the target model. For example in the previous house price prediction task, if the PTM is pre-trained in an area distance from the target area and targets diverse problems, it is hard to utilize the PTM in the target task [222]. Since different tabular tasks may vary in their distribution, feature, or classes, the general assumption is their exist a common "dimension" between the PTM and the target task. Only the distribution changes under the shared dimension and classes, or there exists an overlap between the feature or class spaces [230]. For example, in real-world applications such as healthcare, there are numerous medical diagnostic tables. These tables usually have some features in common such as blood type and blood pressure. For rare diseases with limited data, knowledge transfer from other diagnostic tables with overlapping features becomes beneficial [228]. When the feature/label semantics are available, two different tasks may be linked through the semantic space, and textual PTMs can be used to map the tabular instance to this space or facilitate the prediction in this space [80]. + +Pros and Cons of transferable Models. Learning with a well-trained tabular model has several advantages based on the knowledge encoded in the PTM. First, the training efficiency of the target model is improved and the model may converge fast, as the PTM may provide better initialization weights or optimization paths. Then, the target model will reduce the requirement on the data size, i.e., learning with a few-shot dataset. Training based on a PTM also reduces the number of learnable parameters, leading to parameter-efficient tuning and reducing computational resources. + +# 6.1 Homogeneous Transferable Tabular Model + +Adapting a tabular model from another domain with different distributions is investigated in the field of unsupervised domain adaptation before the era of deep learning. One representative method is the biased regularization, which + +![](images/66966f1b48254c69ec49709de32610ec85ebc14caea1a2cea39c1e73a8debf60.jpg) +Figure 5: Illustration of homogeneous transferable tabular methods. The pre-trained model could be constructed from supervised learning or self-supervised learning, which includes masked language model, contrastive pre-training, and hybrid methods. + +minimizes the difference between the weights of the PTM and the target model, i.e., + +$$ +\min _ {\boldsymbol {W}} \ell (\boldsymbol {W}) + \| \boldsymbol {W} - \boldsymbol {W} ^ {\prime} \| _ {F} ^ {2} = \min _ {\Delta \boldsymbol {W}} \ell \left(\Delta \boldsymbol {W} + \boldsymbol {W} ^ {\prime}\right) + \| \Delta \boldsymbol {W} \| _ {F} ^ {2}. \tag {5} +$$ + +$\ell(W)$ is the loss function on the current weights $W'$ , and the regularize constraint the distance between the target model $W$ and the PTM weights $W'$ . We can reformulate the learning objective as learning the weights residual $\Delta W$ . Biased regularization can be extended to the case where $f$ and $g$ are deep neural networks such as MLP, but it fails when the target model has a different architecture with the PTM. In this case, instead of matching two models through their weights, matching their predictions also helps. For example, twice learning [253] and knowledge distillation [257]. + +Benefiting from the strong capacity of deep neural networks, some recent studies focus on pre-training a tabular model from unsupervised instances, and then adapting the model via fine-tuning the PTM on the target (even few-shot) labeled examples. This strategy could be applied in standard supervised learning or semi-supervised learning. + +Supervised Pre-training Objectives. A straightforward way to incorporate the target variable into the pre-training is by using the input corruption as an augmentation for the standard supervised learning objective. [71] identifies practices to pre-train tabular deep learning models that can be universally applied to different datasets and architectures. They show that using the object target labels during the pre-training stage benefits the downstream performance and advocates several target-aware pre-training objectives. + +Self-Supervised Pre-training Objectives. The self-supervised pre-training objectives can be mainly categorized into three categories, including the masked language model, contrastive pre-training, and hybrid methods. + +Masked Language Model (MLM). MLM is the unsupervised pre-training objective, where a random subset of features is masked for each sample, and the masked values are predicted in a multi-target classification manner [63]. VIME [48] estimates mask vectors from corrupted tabular data and reconstructs feature vectors for self-supervised learning. They use the trained encoder to generate multiple augmented samples for each data point by masking each point using several different masks and then imputing the corrupted values for each masked data point. SubTab [46] finds that reconstructing the data from the subset of its features rather + +than its corrupted version in an autoencoder setting can better capture its underlying latent representation. SEFS [221] reconstructs the original input based on a randomly selected subset of input features, and simultaneously estimates the gate vector that defines which features are selected or not. MET [223] uses a concatenation of representations for all features instead of averaging and uses adversarial reconstruction loss in addition to the standard loss. + +Contrastive Pre-training. Contrastive pre-training uses data augmentations to generate positive pairs or two different augmented views of a given example, and the loss function encourages a feature extractor to map positive pairs to similar features. The key factor in contrastive learning is to generate positive and negative versions of a given instance $x_{i}$ . [70] utilizes CutMix [258] in the input space and Mixup [259] in the embedding space to obtain positive pairs, where other instances $x_{j \neq i}$ are treated as negative ones. SCARF [47] generates a view for a given input by selecting a random subset of its features and replacing them with random draws from their respective empirical marginal distributions. STab [224] relies on two (or multiple) weight-sharing neural networks with different regularizations applied to a single input. By exploiting the stop-gradient operation technique, STab can model invariance with respect to more complicated regularizations while it will not collapse to an undesired trivial solution. DoRA [226] incorporates domain knowledge, training by intra-sample pretext task and inter-sample contrastive learning to learn contextualized representations. DACL+ [220], to overcome the reliance on a particular domain, uses Mixup noise to create similar and dissimilar examples by mixing data samples differently either at the input or hidden-state levels. + +Hybrid Methods. [222] explores several pre-training strategies including both supervised and unsupervised ones. It considers MLM as the unsupervised pre-training objective, and sets multi-label classification as the supervised pre-training objective. By fine-tuning the PTM with several choices, including those with frozen feature extractor or not, the paper observes that supervised pre-training leads to more transferable features in the tabular domain. LFR [227] conducts pretraining by learning to simultaneously reconstruct multiple randomly generated projection functions. It considers diverse data types to show the wide-ranging applicability of learning from randomness, including tabular, vision, and language. ReConTab [225] utilizes both self-supervised learning and semi-supervised learning. It uses regularization techniques for raw feature selection and leverages contrastive learning with labels to distill the most pertinent information for downstream tasks. [71] focuses on the setup with fully labeled tabular datasets to understand if pretraining helps tabular deep learning in a fully supervised setting and compares pretraining methods to the strong supervised baselines. They show that using the object target labels during the pertaining stage is beneficial for the downstream performance and advocate several target-aware pretraining objectives. [256] provides a systematic review and summarizes the recent progress and challenges of self-supervised learning for non-sequential tabular data. + +![](images/0143006dcbf94506f23c4872c5ddb3dfd3a832c6465dc6e88fc3fcdfe7b31008.jpg) +Figure 6: Illustration of heterogeneous transferable tabular methods. During pre-training on one or multiple datasets, most of the parameters in the PTM are trained. For downstream tasks, only a small subset of parameters is fine-tuned while the rest remain fixed. + +# 6.2 Heterogeneous Transferable Tabular Model + +The main intuition lies in the mapping $f$ and $g$ work in a similar fashion, i.e., predicting the labels with similar mechanisms. Therefore, the main idea to transfer knowledge is to match the target model with the well-trained one, over the weight space or the prediction space. + +Early methods mainly focus on the feature-level heterogeneity between $f$ and $g$ . One main assumption is that there exists a shared set of features between the pre-trained task $\mathcal{D}'$ and the target task $\mathcal{D}$ , then we may directly copy the weights corresponding to the shared features from the PTM. Some methods extend bias regularization to deal with heterogeneous feature spaces by padding the weights with zero. OPID [260] is a one-pass learning approach, which only needs to scan each instance once and to deal with evolving streams. In the pre-training stage, OPID compresses important information of vanished features into functions of survived features, and in the adaptation stage, it is expanded to include the augmented features. ReForm [261] learns the meta-representation for each feature and based on which calculates the relationship between features in the meta-representation space. ReForm then bridges the feature space gap through optimal transport, which could be further used to transform classifiers with different features and classes. + +A major advantage of neural models is that they are easily fine-tuned in new domains and learn reusable features. For example, as the deep PTM has the ability to extract generalizable features for a tabular task, reusing the knowledge from the PTM can utilize the strategies designed for visual and language domains. In detail, we can fix most of the parameters in the PTM and tune the remaining parts which only have limited parameters, for example, the linear probing or parameter-efficient fine-tuning. + +Reuse PTM Pre-trained from One Dataset. These methods primarily focus on the difference between the pre-trained and down-streaming datasets. TabRet [72] utilizes masked autoencoding to make the transformer work in downstream tasks. To transfer pre-trained large language models to tabular tasks, ORCA [73] trains an embedder to align the source and target distributions. TabToken [64] focuses on improving the quality of the feature tokens, which are an important component in tabular deep models. TabToken leverages a conditional contrastive loss to improve the + +quality of learned embeddings and demonstrates enhanced transferability of deep learning models for tabular data. + +Pseudo-Feature method [222] utilizes pseudo-feature models individually for each new feature. In detail, given one additional feature in a downstream dataset, it first pretrains a model on the upstream data without that feature. Then Pseudo-Feature fine-tunes the pre-trained model on downstream data to predict values in the column absent from the upstream data. Next, the fine-tuned model is used back in the upstream datasets to predict and assign pseudo-values of this feature. After supplementing the upstream dataset with the "unseen" feature in the downstream task, PseudoFeature pre-trains and transfers the feature extractor to the downstream task again. This method is computationally expensive in our broader feature space adaptation scenario. Reuse PTM Pre-trained from Multiple Datasets. XTab [230] aims to enhance the transferability of the transformer. They address the challenge of inconsistent column types and quantities among tables by utilizing independent features and federated learning to pre-train the shared component. + +Another thread of method learns shared components such as attribute-agnostic transformation across datasets, which provides a good model initialization for partial parameters given a downstream task. [228] infers latent representations of each attribute and each response from a few labeled instances using an inference network. The attribute and response representations are enabled make predictions based on the task-specific properties of attributes and responses even when attribute and response sizes are different across tasks. DEN [229] uses a three-block architecture: a covariate transformation block followed by a distribution embedding block and then a classification block. They provide theoretical insights to show that this architecture allows the embedding and classification blocks to be fixed after pre-training on a diverse set of tasks. Meta-Transformer [231] leverages a frozen encoder to perform multimodal perception without any paired multimodal training data. In Meta-Transformer, the raw input data from various modalities are mapped into a shared space in meta learning [262], allowing a subsequent encoder with frozen parameters to extract high-level semantic features. + +# 6.3 Reusing a Pre-trained Language Model + +In some cases, the semantic meaning of features is available, making it natural to leverage pre-trained language models for tabular data. Typically, two types of semantic information can be derived from a tabular dataset $\mathcal{D}$ . First, attribute names for each of the $d$ features, $\mathcal{A} = A_{1},\ldots ,A_{d}$ , provide useful context. Additionally, meta-information such as a textual description, denoted as meta_description, can further enhance understanding. The learning process is then formulated as: + +$$ +\hat {y} _ {i} = f \left(\boldsymbol {x} _ {i}, \mathcal {A} \mid \mathcal {D}, \text {m e t a} _ {\text {d e s c r i p t}}\right) \tag {6} +$$ + +where the semantic information bridges the gap between feature spaces and facilitates knowledge transfer from pretrained tasks to downstream applications. + +Although pre-trained language models have demonstrated success in various domains, their application to tabular data remains limited due to the prevalence of numerical values and the scarcity of textual descriptions. + +![](images/886d5839208dbb126a86d29a0904962b263e4a1c1cdbafa3fd800eb2090af5e9.jpg) +Figure 7: Illustration of transferable tabular methods with a language model. The language model can be applied at various stages, including feature tokenization, feature engineering, and textual serialization. + +Moreover, concerns about data privacy and security may further restrict access to semantic information. Consequently, language models are typically applied to tabular datasets only when textual context is sufficiently available. + +Language Models for Feature Tokenization. When the feature space changes, language-based methods assume that semantic relationships exist between feature descriptions and rely on large-scale language models to capture these connections. For example, the feature "occupation" in one task may share semantic similarity with the feature "organization" in another, allowing feature-label relationships to be reused across different datasets. By extracting feature embeddings (tokens), tables of varying sizes can be transformed into a standardized set of tokens in a shared space. A pre-trained transformer then encodes transferable knowledge, aiding the fine-tuning process for downstream tasks. + +TransTab [77] trains a tokenizer based on the words present in tabular data and incorporates both column descriptions and table cells as raw input to a gated transformer model. The model is pre-trained via self-supervised learning or supervised contrastive loss and is validated on tasks such as transfer learning and feature incremental learning. PTab [232] adopts a similar approach, learning contextual representations from multiple tokenized tabular datasets before fine-tuning for downstream tasks. UniTabE [182] encodes and fuses information from column names, data types, and cell values into a set of tokens, applying an encoder-decoder architecture with Transformer and LSTM components. It is pre-trained using Multi-Cell-Masking and contrastive learning, where a sub-vector of an instance is treated as a positive sample while other instances or their subsets are considered negatives. + +CM2 [79] introduces a cross-table pre-training framework that integrates attribute names and feature values. CM2 uses transformers to process feature tokens and employs a prompt-based Masked Table Modeling (pMTM) self-supervised objective, where column names act as prompts to assist in predicting masked features. TP-BERTa [78] follows a similar approach but incorporates numerical discretization strategies and magnitude tokenization for feature encoding, fine-tuning smaller pre-trained language models such as RoBERTa [263] for tabular data prediction. Its pre-training objective includes supervised loss and magnitude-aware triplet loss as a regularizer. + +CARTE [233] utilizes a graph representation of tabular + +data to handle heterogeneous feature spaces, transforming textual information from column names and entries into embeddings. A graph-attentional network is then applied to contextualize entries with column names and neighboring entries. CARTE is pre-trained on the YAGO3 knowledge base [264] by constructing graphlets for tabular data and employing contrastive loss, where the original graphlet and one truncated variant are positives, while other graphlets in the batch serve as negatives. The pre-trained CARTE model is subsequently fine-tuned for downstream tasks. + +Language Models for Feature Engineering. Discriminative features enhance the effectiveness of subsequent tabular learning models. Binder [234] identifies task input components that are not directly answerable by a model and leverages LLMs to generate auxiliary features, particularly for knowledge grounding tasks. Given that discriminative features are often manually designed, CAAFE [265] explores the use of LLMs to generate auxiliary features based on task and feature semantics. The quality of these features is then evaluated using a general tabular model, TabPFN [89]. FeatLLM [266] enhances feature generation by incorporating example-based prompting, enabling LLMs to create new features based on textual descriptions. TaPTaP [235] is expected to capture a generic tabular data distribution after ongoing pre-training on a large-scale corpus of real-world tabular data, generating high-quality synthetic tables to support various applications on tabular data. + +Language Models for Textual Serialization. A direct approach to incorporating pre-trained language models involves converting tabular data into a textual format, allowing LLMs to infer relationships between features and labels based on embedded expert knowledge. This concept has been validated in semantic parsing tasks [267], [268]. LIFT [236] and TabLLM [80] serialize tabular data by integrating feature names into text and combining them with task descriptions. This enables LLMs to treat tabular prediction tasks as text generation problems. LIFT fine-tunes models on the entire training set, while TabLLM employs few-shot learning for fine-tuning. UniPredict [237] constructs prompts using metadata, sample serialization, and task instructions, fine-tuning LLMs with confidence-weighted augmented labels predicted by an external model. The approach is validated on multiple in-distribution datasets. + +Despite their advantages, textual serialization methods face challenges when the number of features increases, as prompts may become too large to fit within the model's context window. The effectiveness of LLMs in tabular data tasks remains constrained by the availability of semantic information and the capabilities of external tabular models. Further exploration of LLM-based methods will be discussed in the general tabular models in Section 7. + +# 6.4 Reusing a Pre-trained Vision Model + +Given the success of deep neural networks (DNNs) in visual tasks, it is intuitive to leverage the strong recognition capabilities of pre-trained vision models for tabular data. Additionally, data augmentation strategies commonly used in image processing can be introduced after transforming tabular data into a visual format. Similar ideas have been explored in time series forecasting [269] and irregular time series classification [270]. + +![](images/1cb1d949fe6300f2c81dade78b72b5132ea16551e7587e0e4a4975de40932726.jpg) +Figure 8: Illustration of transferable tabular methods with a vision model. Tabular data can be transformed into images through dimensionality reduction, table reorganization, and the use of image markers. + +The primary challenge lies in representing tabular instances in an image-compatible format. In natural images, neighboring pixels often share semantic relationships, whereas tabular data lacks inherent spatial structure. Features in a tabular instance are permutation-invariant, meaning that exchanging their order does not alter the instance's meaning. Various methods have been proposed to transform tabular data into visual representations, enabling the application of pre-trained vision models fine-tuned for tabular tasks. This subsection highlights different transformation strategies that transfer tabular datasets into images. + +Dimensionality Reduction Transformation. Visualization strategies for tabular data naturally convert tables into images by embedding high-dimensional features into a lower-dimensional space. DeepInsight [238] projects tabular data into a 2D space using t-SNE and constructs images through convex hull analysis, applying translation, rotation, quantization, and normalization. REFINED [239] employs Bayesian Metric Multidimensional Scaling to preserve pairwise distances within the low-dimensional representation, ensuring that structurally similar features remain proximate in the transformed image. + +Table Reorganization Transformation. A tabular dataset $\mathcal{D}$ can be treated as a matrix and represented as a single-channel image or kernel. To enable visual PTMs to recognize meaningful spatial relationships, different strategies have been developed for structuring tabular data into images. Tabular Convolution (TAC) [240] arranges data samples into zero-mean square matrices (kernels) of odd integer dimensions. These kernels are then convolved with a fixed "base image," and the resulting images are subsequently fed to a CNN for classification. Image Generator for Tabular Data (IGTD) [74] and TabEye [75] share a similar idea, generating an image for each data sample where pixel intensities correspond directly to feature values. These methods prioritize placing similar features in close proximity but struggle with high-dimensional tabular tasks. LM-IGTD [241] extends IGTD by incorporating stochastic feature generation to enhance robustness and generalization. + +Image Marker Transformation. Another approach involves encoding feature values as visual markers within an image. Super-TML [242] assigns feature values to predetermined positions within an image, effectively handling categorical and numerical datasets. Tab2Visual [76] normalizes tabular data and represents each instance as a row of multiple bars, each corresponding to a specific value. Each feature + +is assigned a unique color to enhance visual differentiation, while bar widths are proportional to feature magnitudes. + +By transforming tabular data into images, these methods enable the application of powerful pre-trained vision models to tabular prediction tasks, leveraging established deep learning techniques from the vision domain to enhance tabular model performance. + +# 7 FROM TRANSFERABLE TO GENERAL MODEL + +The general model (also referred to as the tabular foundation model) represents an advancement over the transferable model. It extends the generalization capabilities of a pretrained tabular model to a variety of heterogeneous downstream tabular tasks, regardless of their diverse feature and class spaces, without requiring additional fine-tuning. In other words, given a pre-trained model $g_{\Theta}$ , it can be directly applied to a downstream tabular task $\mathcal{D}$ to predict the label of a test instance $x^{*}$ as follows: + +$$ +\hat {y} ^ {*} = g _ {\Theta} \left(\boldsymbol {x} ^ {*} \mid \mathcal {D}\right). \tag {7} +$$ + +Thus, the general model shares similarities with the transferable tabular model, but with a greater emphasis on the "zero-shot" ability, aims to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. Importantly, it does not require an Adapt function, which further reduces the computational cost of hyper-parameter tuning. The goal of the general tabular model is to achieve better generalization on downstream tabular datasets $\mathcal{D}$ when compared to alternative strategies, such as training a tabular model directly on $\mathcal{D}$ or adapting a transferable model. + +Remark 6. Distinguishing between an advanced transferable tabular model, pre-trained on a wide range of heterogeneous tabular tasks, and the general tabular model can be challenging. Some transferable tabular models, based on auxiliary feature semantics, are able to predict labels for downstream test instances directly [80]. However, their prediction ability is constrained and typically applicable only in specific areas after fine-tuning [78], [233]. The general tabular model, on the other hand, is designed to handle a wider range of heterogeneous tabular tasks, sharing similar pre-training challenges with transferable models but without utilizing additional semantics. Fine-tuning a pre-trained general model is also an option for further performance improvements [93], [96]. + +Pre-training has revolutionized domains such as vision and language [271], [84], but its adoption in tabular data remains limited due to the inherent heterogeneity of tabular datasets. Tabular datasets can vary significantly in both dimensionality (i.e., the number of columns) and the semantic meaning of each dimension, even within the same application. For example, different healthcare datasets may capture varying levels of detail and aspects of patient information. Even within the same feature entry (e.g., the $d$ -th column), the meaning can vary (e.g., "age" vs. "height"). This contrasts with vision and text data (within the same language), where different data sources typically share the same "vocabulary" (e.g., pixels, patches, or sub-words) and similar relationships between vocabulary "elements" (e.g., neighboring pixels + +![](images/2810f4f79d0742a7f04b257adc7a6d771ff30010de75e71b2e070205dd3e7735.jpg) +Figure 9: Illustration of general methods. These methods handle inherent heterogeneity by improving the model's adaptability or homogenizing the diverse tabular formats. Once pre-trained, they can be directly applied to downstream tasks without fine-tuning. + +![](images/0c3b348377cdacba43d6c9c27c9890a6b7801bef55e3b56fb722125736d11ff9.jpg) + +often share colors). The lack of shared vocabulary and relationships in tabular data makes it challenging to jointly train a model across multiple datasets, let alone apply a pre-trained model directly to new downstream tasks. + +There are two main strategies to address the inherent heterogeneity in tabular datasets: improving the model's adaptability or homogenizing the diverse tabular formats. We categorize general tabular models into three parts based on their strategies for achieving generalizability. The first focuses on raw-feature-based approaches, among which TabPFN variants represent a rapidly evolving branch and are thus discussed separately. The third category encompasses semantic-based methods that leverage attribute and task semantics to unify heterogeneous tasks. + +# 7.1 Raw-Feature-based General Models + +To adapt a general tabular model to heterogeneous tabular datasets during the pre-training and fine-tuning stages, two main strategies can be used from the data-centric and model-centric perspectives. From the data-centric perspective, the general model may standardize tabular datasets into a homogeneous form. For instance, TabPTM [86] transforms all datasets into a uniform format using meta-representation to enable pre-training. The pre-trained model can then be applied directly to a downstream dataset or fine-tuned without introducing additional parameters. + +Alternatively, from the model-centric perspective, the general model may improve adaptability by tailoring it to specific tabular tasks. HyperFast [87] adopts the concept of a Hyper Network [272] in meta-learning [273], where a mapping from the tabular dataset to the weights of a classifier is learned. This mapping can then be used to predict labels for unseen instances from the task. To address datasets with varying dimensions, HyperFast projects datasets into a fixed size using random projections. To overcome the slow weight generation speed, MotherNet accelerates HyperFast by modifying its architecture with Transformer-like modules [88]. + +# 7.2 TabPFN Variants + +The TabPFN family of models [89], [91] leverages the incontext learning capabilities of transformers, directly predicting labels by adapting test instances according to the context of training examples. In the first version of TabPFN, an instance $\boldsymbol{x}_i$ is padded to a fixed dimension (e.g., 100), and the features are projected to a higher dimension (e.g., $d'$ ) for further processing. The label $y_i$ is processed similarly and + +added to the instance embeddings. The embeddings of all $N + 1$ instances, including training and test instances, are formulated into a set of $N + 1$ tokens with $d'$ dimensions. These tokens are processed through several layers of a Transformer, and the output token corresponding to the test instance is further predicted using a 10-way classifier. TabPFN is pretrained over synthetically generated datasets with structured causal models (SCM) [274] and Bayesian Neural Networks (BNNs) [275], [276], enabling the strong in-context learning ability, with the best checkpoint selected based on some real-world datasets. Due to the high complexity of transformers, TabPFN is limited to small-scale tasks, with suggested sizes of $N < 1000$ , $d < 100$ , and $C < 10$ . + +TabPFN v2 introduces a specialized feature tokenizer to better handle heterogeneity. Specifically, each cell in the table is projected to a $k$ -dimensional vector using a shared mapping, and random position encoding vectors are added to differentiate features [277]. This results in a tensor of size $(N + 1) \times (d + 1) \times k$ when there is a single test instance. The label of each instance is processed similarly, and the mapped $k$ -dimensional token is concatenated with the instance tokens. A dummy label (e.g., the average of all labels) is used for the test instance since its label is unknown. A two-way attention mechanism is used, with each feature attending to the other features in its row and then attending to the same feature across its column [278]. The output token corresponding to the label of the test instance is further mapped to a 10-class classifier or regressor. Several improvements have been made in TabPFN v2, including increased context size ( $N < 10000$ , $d < 500$ ), automatic feature engineering, and post-hoc ensemble methods. [279] analyzes TabPFN from a bias-variance perspective, shedding light on its generalization capabilities. Various applications have also been explored, including tabular data generation [280], anomaly detection [281], data augmentation [282], and time series forecasting [283]. + +The improvements of TabPFN (especially TabPFN v1) stem from several aspects. + +Pre-training Improvements. TabForestPFN [284] extends TabPFN by pre-training In-Context Learning (ICL)-transformers on a new forest dataset generator that creates unrealistic datasets with complex decision boundaries. TabDPT [179] pre-trains the architecture on real-world datasets using self-supervised learning and retrieval objectives, making it suitable for both classification and regression tasks. APT [285] is pre-trained utilizing adversarial synthetic data generated by adaptive agents, which systematically modify the underlying data-generating distribution and deliberately challenge the model with diverse synthetic datasets to enhance its robustness and generalization capabilities. TabICL [286] integrates tree-based SCMs using XGBoost [130] to model complex interactions and employs curriculum learning by progressively increasing synthetic dataset sizes. Scalable Improvements. The efficiency of TabPFN is highly sensitive to context size, prompting strategies to enhance scalability and performance [39]. These include compressing training data into a compact learned representation using sketching [287] or prompt tuning techniques [288], [289], + +1. Some variants of TabPFN are not considered general tabular models, especially the latter parts, as they require additional fine-tuning steps. We place them in this subsection due to their strong relationship with TabPFN. + +employing adaptive data selection methods to identify the most pertinent training examples for each test instance [290], [90], [179], [291], and replacing traditional quadratic attention with computationally efficient linear attention mechanisms [292] and state-space models (SSMs) [293]. + +Adaptation Improvements. Some approaches improve TabPFN's performance on downstream tasks by adapting the context [90] or fine-tuning specific parts of the model [96], [284], [290], [289]. TabICL [286] employs a column-then-row attention mechanism to construct fixed-dimensional embeddings of rows, which are subsequently processed by a transformer like TabPFN v1 to facilitate efficient in-context learning. EquiTabPFN [294] introduces self-attention across target components, ensuring that the arbitrary ordering of target dimensions does not influence model predictions, enhancing the performance of TabPFN v1 to some extent. + +# 7.3 Semantics-based General Models + +By leveraging the semantic structure of tabular data, such as column names, heterogeneous tasks can be projected into a shared language space. This allows a single language model, pre-trained on diverse tabular datasets, to handle unseen tasks in a unified manner. TabuLa-8B [92] fine-tunes a Llama 3-8B LLM for tabular data prediction (classification and binned regression) using a novel packing and attention scheme for tabular prediction. GTL [93] transforms tabular datasets into an instruction-oriented language format, facilitating the continued pre-training of LLMs on instruction-oriented tabular data, which demonstrates strong performance in few-shot scenarios. GTL-S [295] unlocks the potential of GTL from a scaling perspective, revealing that scaling datasets and prediction tasks enhance generalization. [94] extends GTL by incorporating retrieval-augmented LLMs for tabular data, combined with retrieval-guided instruction-tuning for LLMs. MediTab [243] uses a data engine that leverages LLMs to consolidate tabular samples to overcome the barrier across tables with distinct schema. MediTab aligns out-domain data with the target task using a "learn, annotate, and refinement" pipeline, enabling the pre-trained model to infer for arbitrary tabular input in the domain without fine-tuning. + +# 8 TABULAR ENSEMBLE METHODS + +Ensemble learning is a natural way to improve the generalization ability of multiple base learners by leveraging their diversity. Classical methods such as Random Forest [127] and AdaBoost [126], [296] employ bagging and boosting, respectively, by ensembling multiple decision trees. These methods have proven effective for tabular data, as they reduce bias/variance and improve robustness [297]. + +In deep tabular learning, ensemble methods can be categorized into two primary approaches: joint-training ensembles, where multiple sub-networks are aggregated within a single training pipeline, and post-hoc ensembles, where the predictions from multiple pre-trained deep tabular models are fused. One major challenge in ensembling deep tabular methods is computational efficiency, as training multiple deep models or sub-models can be computationally expensive and time-consuming. + +# 8.1 Joint-Training Ensembles + +Joint-training ensemble methods integrate diverse model architectures within a single training process to improve predictive performance while maintaining efficiency. These architectures often combine different types of models, such as linear and non-linear models [28] or tree-based and deep neural network-based approaches [63]. Tree-mimic methods leverage this concept by mixing predictions from multiple tree nodes to enhance robustness [60], [59], [193]. + +To improve efficiency while maintaining predictive power, various techniques have been explored. Some approaches employ parameter-efficient ensembles, such as TabM [176], which uses MLPs as base learners and incorporates BatchEnsemble [298] to generate multiple diverse base learners efficiently. This prevents a large increase in the number of learnable parameters while maintaining model diversity. Similarly, BETA leverages pre-trained TabPFN by generating multiple base learners through additional parameter tuning [96]. Specifically, BETA learns multiple feature projections, feeding the projected training sets into TabPFN and aggregating the results while applying BatchEnsemble to reduce the number of additional learnable parameters. + +Some hybrid approaches, such as LLM-Boost and PFN-Boost, have been developed to integrate large language models and TabPFN with gradient-boosted decision trees [299]. In these approaches, LLMs and PFN serve as the initial base learners, and additional base learners are sequentially trained in a boosting manner. This approach leverages the strong prior knowledge from LLMs and TabPFN while maintaining the scalability of gradient-boosted decision trees. + +# 8.2 Post-Hoc Ensembles + +Post-hoc ensemble (PHE) methods involve combining multiple trained models to improve robustness and accuracy. Bagging-based ensembles are one of the most direct post-hoc strategies, where usually multiple models trained with different random seeds are aggregated [33], [69]. Although this approach improves model robustness, it incurs high computational overhead. Some recent studies have demonstrated that LLM-based methods exhibit diverse prediction behaviors compared to deep tabular models that do not utilize attribute names [94]. This difference in prediction styles enhances their complementarity, making them ideal candidates for ensemble methods. + +Instead of explicitly training multiple models, perturbation-based approaches create diverse predictions from the same pre-trained model. One such method applies feature permutation with TabPFN, leveraging the fact that TabPFN is not fully feature permutation-invariant [89]. A perturbation-based ensemble can be formed by randomly permuting the feature order in both the training and test sets and making predictions multiple times, generating multiple diverse predictors without additional training costs. TabPFN v2 introduces additional perturbations to enhance diversity among several key factors, including variations in feature encoding, feature quantization, categorical feature shuffling, SVD-based feature compression, outlier removal, and power transformations such as the Yeo-Johnson transformation [91]. These randomly selected transformations create diverse + +prediction patterns, enabling effective ensemble learning without requiring multiple separately trained models. + +Another post-hoc ensemble strategy employed in TabPFN v2 is the use of Portfolio-Based Ensemble, where a fixed set of TabPFN configurations is used [91]. A greedy ensemble selection technique is then applied to learn optimal weights for aggregating the predictions of different configurations [300]. By combining multiple perturbed models, this method improves generalization without excessive training costs. Some methods apply ensemble techniques to TabPFN v1 to handle large datasets. For instance, TabPFN-Bagging [96], [301] divides large datasets into multiple context groups, with the final results averaged to mitigate variance. BoostPFN [301] treats TabPFN v1 as weak learners, where each weak learner uses a subset of the training data as context. This approach allows BoostPFN to outperform standard Prior Fitted Networks (PFNs) on large datasets. + +# 9 EXTENSIONS + +In this section, we briefly introduce some extensions on deep tabular methods across different complex tasks. + +Clustering. Traditional clustering approaches often leverage enhanced distance metrics, such as the Gower distance [302], which is specifically designed for mixed data types, and interpretable prototypes, such as K-medoids. Recent advances in tabular data clustering have sought to integrate interpretability constraints with deep representation learning. For example, IDC [97] introduces a deep learning framework for general tabular data that predicts interpretable cluster assignments at both the instance and cluster levels. To address overlapping clusters, TableDC [98] integrates the Mahalanobis distance, which accounts for variance and correlation within the data. This method provides a similarity measure suitable for tables, rows, or columns in high-dimensional latent spaces. + +Anomaly Detection. Anomaly detection in tabular data is crucial for identifying subtle irregularities in structured datasets, such as fraudulent transactions or equipment failures. While classical techniques like Isolation Forest [303] and Local Outlier Factor [304] remain foundational, recent developments have incorporated various methods to capture contextual relationships in high-dimensional data. For instance, [305] introduces a method that learns mappings that maximize mutual information between each sample and the part that is masked out, capturing the structural nuances of samples from a single training class. ADBench [99] provides a comprehensive tabular anomaly detection benchmark with 30 algorithms and 57 benchmark datasets. Additionally, large language models (LLMs) have also been employed for anomaly detection in tabular data [306]. + +Tabular Generation. Tabular data generation has become an essential tool for synthetic data creation, privacy preservation, and addressing data scarcity. Traditional methods, such as Bayesian networks or GANs, focus on mimicking marginal distributions, while recent advancements emphasize preserving complex feature dependencies and semantic consistency. For instance, tabular diffusion models [307] iteratively refine synthetic data to capture subtle correlations in high-dimensional datasets, outperforming GANs in terms of data + +fidelity. [308] introduces high-order structural causal information as a natural prior knowledge and offers a benchmark framework for evaluating tabular synthesis models. Despite these advances, challenges remain in balancing realism with privacy, such as avoiding identity leakage in sensitive datasets, and scaling to heterogeneous data types. Hybrid neuro-symbolic models [309] bridge this gap to provide trustworthy synthetic data for downstream tasks. + +Interpretability. Traditional gradient-boosted decision trees (GBDTs) inherently provide interpretability through feature importance scores and decision path visualization. Frameworks such as XGBoost [130] and LightGBM [131] quantify feature importance using metrics like split frequency and information gain. SHAP values [310] enable instance-level explanations by decomposing model predictions into feature contributions. The additive nature of GBDTs allows for partial dependence plots [311] to visualize feature effects while controlling for interactions. NeC4.5 [253], a novel decision tree algorithm that integrates the comprehensibility of decision trees with the generalization ability of neural network ensembles. By training a neural network ensemble to generate a new training set, NeC4.5 enhances decision tree performance while maintaining interpretability. + +Recent deep models specifically designed for tabular data have introduced novel interpretability mechanisms. For example, NAMs [255] combine some of the expressivity of DNNs with the inherent intelligibility of generalized additive models. They learn a linear combination of neural networks that each attend to a single input feature, which are trained jointly and can learn arbitrarily complex relationships between their input feature and the output. TabNet [105] uses sequential attention with learnable feature masks, where each decision step explicitly selects a subset of features via sparse masking. The aggregated feature usage across steps provides global interpretability comparable to GBDT's feature importance. Subsequent variants, such as TabTransformer [63], enhance interpretability by visualizing cross-feature attention patterns. FT-Transformer [33] combines feature tokenization with explainable attention, while NODE [60], NODE-GAM [61] and DOFEN [312] generalize ensembles of oblivious decision trees, benefiting from both end-to-end gradient-based optimization and multi-layer hierarchical representation learning. + +Open-Environment Tabular Machine Learning. Research on distribution shifts in tabular data starts with domain-to-domain shifts [110], which are commonly categorized based on the availability of target domain data. When target data is available, transfer learning techniques such as unsupervised domain adaptation [313] and test-time adaptation [314] are widely used. These methods adapt model parameters using test-time inputs but rely on access to target distributions, which may not always be feasible. In contrast, when target data is unavailable, a more practical but challenging scenario, methods aiming to enhance robustness and generalization, using approaches such as domain generalization [315], domain robustness [316], [317], label robustness [318] or ensemble strategies [95]. TableShift [110] provides a detailed analysis of this scenario. + +Beyond domain-to-domain shifts, temporal shifts are more general and complex. TabReD [109] emphasizes the inherent temporality of real-world tabular data, advocating + +for temporal splits for training and testing. [319] further propose a refined training protocol focusing on temporal evaluation, significantly improving generalization across models. To address temporal shifts, it's critical to incorporate temporal information [319]. Drift-Resilient TabPFN [174] models temporal shifts with a secondary SCM, which specifies changes in the primary model parameters. [319] introduce a plug-and-play temporal embedding that effectively captures trend and periodicity patterns, providing an adaptive mechanism to mitigate the impact of temporal shifts. Under temporal shift conditions, most methods experience performance degradation, while TabM [95] exhibits relative robustness [109]. However, [319] demonstrate that with the refined training protocol and temporal embedding, methods such as ModernNCA [35] can regain competitiveness. + +Multi-modal Learning with Tabular Data. Text, such as feature names, can be effectively utilized to enhance tabular data learning, as discussed in Section 6. Here, we focus on interactions with the image modality, e.g., in healthcare, where medical images require specialized equipment and expert knowledge, often in tabular form, for accurate diagnosis [320]. To tackle challenges like large medical datasets and high annotation costs, MMCL [106] uses a contrastive self-supervised learning framework that integrates images and tabular data. CHARMS [107] transfers expert knowledge from tabular data to images, improving image predictions even without tabular data during inference, thus reducing reliance on costly expert annotations. TIP [321] proposes a self-supervised learning strategy with a tabular encoder for incomplete, heterogeneous data and a multimodal interaction module for inter-modality representation learning. + +Tabular Understanding. Tabular understanding involves comprehending the information contained within a table and can be broken down into several tasks. For example, Table Detection (TD) [322], [323] refers to identifying the region of the image that contains the table while Table Structure Recognition (TSR) [324], [325] involves the identification of the rows and columns to identify individual table cells, which aims to recognize the cellular structures of tables from table images by extracting the coordinates of cell boxes and row/column spanning information. Table Question Answering (TQA) [326], [327], [112] refers to providing precise answers from tables to answer a user's question. Traditional methods, whether OCR-based [328], [329], [330] or OCR-free [331], [332], [333], [334], [335], have made significant strides in TSR and TD, which are relatively simpler tasks. + +More complex tasks, such as TQA, have also been the focus of considerable effort. For example, Donut [332] proposes a novel task and a synthetic document image generator to pre-train the model, reducing reliance on large-scale real document images. Monkey and TextMonkey [336], [337] utilize shifted window attention and use similarity measures to filter out redundant tokens. mPLUG-DocOwl [338] adapts mPLUG-Owl for OCR-free document understanding, while TabPedia [335] constructs low- and high-resolution vision encoders with a concept synergy mechanism for visual table understanding. [339] focuses on exploring various table representations and directly prompting LLMs to improve performance. Please refer to [112], [113] for more details. + +# 10 DISCUSSIONS + +In this section, we discuss several possible future directions for tabular machine learning, particularly in light of the significant potential demonstrated by tabular general/foundation models. + +The Ability to Handle Dynamic and Open Environments. Tabular models, particularly foundation models, will increasingly need to operate in dynamic, real-world environments where data evolves over time [340]. One of the key challenges is dealing with imbalanced datasets [155], where certain classes may be underrepresented, and the distribution of data may shift over time [110]. As a result, models need to adapt to these changes and continue providing accurate predictions. Additionally, the emergence of new classes in the data may require the model to evolve and update its predictions in real-time [341]. This calls for methods that ensure tabular foundation models can accommodate evolving data, handling both new classes and changing distributions effectively. + +The Coverage and Scope of Tabular Foundation Models. Current tabular foundation models have demonstrated strong performance on various unseen classification and regression tasks. However, several important questions remain about their capabilities. For instance, in addition to in-context learning [246], are there other prediction strategies that could be employed to further enhance the versatility and performance of tabular foundation models? Beyond classification and regression, can these models be extended to handle related tasks such as clustering, imputation, outlier detection, or even table-based question answering (QA)? Expanding the task scope could increase the model's utility in a wide range of applications. Furthermore, it is worth investigating whether there is a scaling law [342] for tabular foundation models. Currently, tabular checkpoints are relatively small compared to foundation models in other modalities, such as language or vision. Understanding the implications of scaling these models—particularly the trade-offs between model size and performance—will be crucial for their future development. + +Will Foundation Models Always Help? While foundation models have demonstrated impressive generalization abilities, there are inherent trade-offs. Similar to ensemble learning, a single foundation model may provide an "average" predictive ability across tasks, potentially losing specialized expertise for specific tasks. To address this, a promising approach could be the development of a "tabular model zoo" [343], [344]. In this paradigm, different pre-trained models, potentially including models from other domains, could be combined for a specific tabular task. Given a new task, suitable pre-trained models could be selected, adapted if necessary, and integrated for improved performance. + +Model Efficiency. In many real-world applications, tabular datasets are large and high-dimensional, posing significant challenges for both training and inference [345], [44]. One area of concern is how to handle extreme cases, such as when the data is exceptionally large or sparse. Foundation models should be able to scale effectively in these scenarios without sacrificing performance. Another issue is inference speed. In large-scale problems, timely predictions are essential, especially when deployed in real-time environments [292]. Opti- + +mizing the inference process is therefore critical to ensure that predictions can be made quickly on large, complex datasets. Lastly, the computational resources required for training and deploying foundation models can be substantial [346]. Optimizing resource usage through methods such as model pruning, quantization, and efficient training algorithms will be important to ensure that these models remain practical and accessible for a wide range of applications. + +Bridging the Gap Between Tabular Data and Other Modalities. Tabular data often coexists with other data modalities, such as images and text. One of the exciting challenges in the field is how to effectively integrate tabular data with foundation models from other domains [347]. Combining the strengths of tabular models with those of vision or language models could result in more powerful and versatile models capable of handling multimodal data. Exploring how to seamlessly integrate these modalities—whether through joint embeddings, cross-modal attention mechanisms, or other techniques—could unlock significant advances in tasks that require both structured tabular data and unstructured data sources like images or text. + +# 11 CONCLUSION + +Tabular data remains a cornerstone of real-world machine learning applications, and the advancement of deep learning has opened new possibilities for effective representation learning in this domain. In this survey, we present a comprehensive overview of deep tabular representation learning, covering its background, challenges, evaluation benchmarks, and the discussion between tree-based models and DNNs. We systematically categorize existing methods into three categories—specialized, transferable, and general models—based on their generalization capabilities. In addition, we discuss ensemble techniques, extensions, and some promising future directions, such as open-environment and multimodal tabular learning. We hope this survey serves as a valuable reference for understanding the current state of the field and inspires further progress in developing more robust and generalizable tabular learning methods. + +# REFERENCES + +[1] B. Kovalerchuk and E. Vityaev, Data mining in finance: advances in relational and hybrid methods. Springer Science & Business Media, 2005. 1 +[2] S. L. Hyland, M. Faltys, M. Hüser, X. Lyu, T. Gumbsch, C. Esteban, C. Bock, M. Horn, M. Moor, B. Rieck et al., "Early prediction of circulatory failure in the intensive care unit using machine learning," Nature medicine, vol. 26, no. 3, pp. 364-373, 2020. 1 +[3] C. Romero and S. Ventura, "Educational data mining: a review of the state of the art," IEEE Transactions on Systems, Man, and Cybernetics, vol. 40, no. 6, pp. 601-618, 2010. 1 +[4] X. Amatriain, A. Jaimes, N. Oliver, and J. M. Pujol, "Data mining methods for recommender systems," in Recommender systems handbook. Springer, 2010, pp. 39-71. 1 +[5] R. Tibshirani, T. Hastie, B. Narasimhan, and G. Chu, "Diagnosis of multiple cancer types by shrunken centroids of gene expression," Proceedings of the National Academy of Sciences, vol. 99, no. 10, pp. 6567-6572, 2002. 1, 4 +[6] O. Ivanciuc et al., "Applications of support vector machines in chemistry," Reviews in computational chemistry, vol. 23, p. 291, 2007. 1 +[7] N. K. Ahmed, A. F. Atiya, N. E. Gayar, and H. El-Shishiny, "An empirical comparison of machine learning models for time series forecasting," Econometric reviews, vol. 29, no. 5-6, pp. 594-621, 2010. + +[8] M. R. Allen and D. A. Stainforth, "Towards objective probabilistic climate forecasting," Nature, vol. 419, no. 6903, pp. 228-228, 2002. 1 +[9] V. Borisov, T. Leemann, K. Seßler, J. Haug, M. Pawelczyk, and G. Kasneci, "Deep neural networks and tabular data: A survey," IEEE Transactions Neural Networks and Learning Systems, vol. 35, no. 6, pp. 7499-7519, 2024. 1, 4, 7, 8 +[10] C. C. Aggarwal, Data Mining - The Textbook. Springer, 2015. 1 +[11] Z. Ji, Z. C. Lipton, and C. Elkan, "Differential privacy and machine learning: a survey and review," CoRR, vol. abs/1412.7584, 2014. 1 +[12] M. F. Delgado, E. Cernadas, S. Barro, and D. G. Amorim, "Do we need hundreds of classifiers to solve real world classification problems?" Journal of Machine Learning Research, vol. 15, no. 1, pp. 3133-3181, 2014. 1, 5, 6 +[13] C. Bishop, Pattern recognition and machine learning. Springer, 2006. 1 +[14] T. Hastie, R. Tibshirani, and J. H. Friedman, The Elements of Statistical Learning: Data Mining, Inference, and Prediction, 2nd Edition. Springer, 2009. 1, 4 +[15] M. Mohri, A. Rostamizadeh, and A. Talwalkar, Foundations of Machine Learning. MIT Press, 2012. 1 +[16] K. P. Murphy, Probabilistic Machine Learning: An introduction, ser. Adaptive computation and machine learning series. MIT Press, 2022. 1 +[17] A. Voulodimos, N. Doulamis, A. Doulamis, E. Protopapadakis et al., "Deep learning for computer vision: A brief review," Computational intelligence and neuroscience, vol. 2018, 2018. 1 +[18] D. W. Otter, J. R. Medina, and J. K. Kalita, "A survey of the usages of deep learning for natural language processing," IEEE transactions on neural networks and learning systems, vol. 32, no. 2, pp. 604-624, 2020. 1 +[19] Y. Bengio, A. Courville, and P. Vincent, "Representation learning: A review and new perspectives," IEEE transactions on pattern analysis and machine intelligence, vol. 35, no. 8, pp. 1798-1828, 2013. 1 +[20] Y. LeCun, Y. Bengio, and G. Hinton, "Deep learning," nature, vol. 521, no. 7553, pp. 436-444, 2015. 1 +[21] I. Goodfellow, Y. Bengio, and A. Courville, Deep learning. MIT press, 2016. 1 +[22] J. Donahue, Y. Jia, O. Vinyals, J. Hoffman, N. Zhang, E. Tzeng, and T. Darrell, "Decaf: A deep convolutional activation feature for generic visual recognition," in ICML, 2014, pp. 647-655. 1 +[23] G. E. Hinton and R. R. Salakhutdinov, "Reducing the dimensionality of data with neural networks," science, vol. 313, no. 5786, pp. 504-507, 2006. 2, 4 +[24] J. Weston, F. Ratle, and R. Collobert, "Deep learning via semi-supervised embedding," in ICML, 2008, pp. 1168-1175. 2, 4 +[25] L. Van Der Maaten, "Learning a parametric embedding by preserving local structure," in AISTATS, 2009, pp. 384-391. 2, 4 +[26] M. R. Min, L. Maaten, Z. Yuan, A. J. Bonner, and Z. Zhang, "Deep supervised t-distributed embedding," in ICML, 2010, pp. 791-798. 2, 4 +[27] W. Zhang, T. Du, and J. Wang, "Deep learning over multi-field categorical data -- A case study on user response prediction," in ECIR, 2016, pp. 45-57. 2, 4 +[28] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah, "Wide & deep learning for recommender systems," in DLRS, 2016, pp. 7-10. 2, 4, 19 +[29] K. G. Mehrotra, C. K. Mohan, H. Huang, K. G. Mehrotra, C. K. Mohan, and H. Huang, Anomaly detection. Springer, 2017. 2, 4 +[30] F. O. Isinkaye, Y. O. Folajimi, and B. A. Ojokoh, "Recommendation systems: Principles, methods and evaluation," Egyptian informatics journal, vol. 16, no. 3, pp. 261-273, 2015. 2, 4 +[31] S. S. Rangapuram, M. W. Seeger, J. Gasthaus, L. Stella, Y. Wang, and T. Januschowski, "Deep state space models for time series forecasting," in NeurIPS, 2018, pp. 7796-7805. 2, 4 +[32] B. Lim and S. Zohren, "Time-series forecasting with deep learning: a survey," Philosophical Transactions of the Royal Society A, vol. 379, no. 2194, p. 20200209, 2021. 2, 4 +[33] Y. Gorishniy, I. Rubachev, V. Khrulkov, and A. Babenko, "Revisiting deep learning models for tabular data," in NeurIPS, 2021, pp. 18932-18943. 2, 3, 4, 6, 7, 8, 9, 11, 12, 19, 20 +[34] D. Holzmüller, L. Grinsztajn, and I. Steinwart, "Better by default: Strong pre-tuned mlp's and boosted trees on tabular data," in NeurIPS, 2024, pp. 26577-26658. 2, 4, 5, 7, 9, 12 + +[35] H.-J. Ye, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, "Revisiting nearest neighbor for tabular data: A deep tabular baseline two decades later," in ICLR, 2025. 2, 3, 4, 9, 10, 21 +[36] L. Grinsztajn, E. Oyallon, and G. Varoquaux, "Why do tree-based models still outperform deep learning on typical tabular data?" in NeurIPS, 2022, pp. 507-520. 2, 5, 6, 7, 8 +[37] R. Shwartz-Ziv and A. Armon, "Tabular data: Deep learning is not all you need," Information Fusion, vol. 81, pp. 84-90, 2022. 2 +[38] E. Beyazit, J. Kozaczuk, B. Li, V. Wallace, and B. Fadlallah, "An inductive bias for tabular deep learning," in NeurIPS, 2023, pp. 43108-43135. 2, 7, 11 +[39] D. C. McElfresh, S. Khandagale, J. Valverde, V. P. C., G. Ramakrishnan, M. Goldblum, and C. White, "When do neural nets outperform boosted trees on tabular data?" in NeurIPS, 2023, pp. 76336-76369. 2, 5, 6, 7, 8, 18 +[40] H.-J. Ye, D.-C. Zhan, N. Li, and Y. Jiang, "Learning multiple local metrics: Global consideration helps," IEEE transactions on pattern analysis and machine intelligence, vol. 42, no. 7, pp. 1698-1712, 2019. 2 +[41] S. M. Jesus, J. Pombal, D. Alves, A. F. Cruz, P. Saleiro, R. P. Ribeiro, J. Gama, and P. Bizarro, "Turning the tables: Biased, imbalanced, dynamic tabular datasets for ML evaluation," in NeurIPS, 2022, pp. 33563-33575. 2, 5 +[42] R. Kohli, M. Feurer, K. Eggensperger, B. Bischl, and F. Hutter, "Towards quantifying the effect of datasets for benchmarking: A look at tabular machine learning," in ICLR Workshop, 2024. 2, 6 +[43] A. Tschalzev, S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, "A data-centric perspective on evaluating machine learning models for tabular data," in NeurIPS Datasets and Benchmarks Track, 2024. 2, 6, 8 +[44] H.-J. Ye, S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and D.-C. Zhan, "A closer look at deep learning on tabular data," CoRR, vol. abs/2407.00956, 2024. 2, 6, 7, 8, 21 +[45] Y. Gorishniy, I. Rubachev, and A. Babenko, "On embeddings for numerical features in tabular deep learning," in NeurIPS, 2022, pp. 24991-25004. 2, 4, 8, 9, 11 +[46] T. Ucar, E. Hajiramezanali, and L. Edwards, "Subtab: Subsetting features of tabular data for self-supervised representation learning," in NeurIPS, 2021, pp. 18853-18865. 2, 9, 14 +[47] D. Bahri, H. Jiang, Y. Tay, and D. Metzler, "Scarf: Self-supervised contrastive learning using random feature corruption," in ICLR, 2022. 2, 9, 14 +[48] J. Yoon, Y. Zhang, J. Jordon, and M. van der Schaar, "VIME: extending the success of self- and semi-supervised learning to tabular domain," in NeurIPS, 2020, pp. 11.033-11.043. 2, 9, 13, 14 +[49] J. Wu, S. Chen, Q. Zhao, R. Sergazinov, C. Li, S. Liu, C. Zhao, T. Xie, H. Guo, C. Ji, D. Cociorva, and H. Brunzell, "Switchtab: Switched autoencoders are effective tabular learners," in AAAI, 2024, pp. 15924-15933. 2, 7, 9, 13 +[50] A. Kadra, M. Lindauer, F. Hutter, and J. Grabocka, "Well-tuned simple nets excel on tabular datasets," in NeurIPS, 2021, pp. 23928-23941. 2, 4, 6, 9, 10, 12 +[51] R. Wang, B. Fu, G. Fu, and M. Wang, "Deep & cross network for ad click predictions," in ADKDD, 2017, pp. 1-7. 2, 7 +[52] G. Klambauer, T. Unterthiner, A. Mayr, and S. Hochreiter, "Self-normalizing neural networks," in NIPS, 2017, pp. 971-980. 2, 9, 12 +[53] G. Ke, J. Zhang, Z. Xu, J. Bian, and T.-Y. Liu, "Tabnn: A universal neural network solution for tabular data," 2018. 2 +[54] R. Wang, R. Shivanna, D. Z. Cheng, S. Jain, D. Lin, L. Hong, and E. H. Chi, "DCN V2: improved deep & cross network and practical lessons for web-scale learning to rank systems," in WWW, 2021, pp. 1785-1797. 2, 7, 9, 12 +[55] J. Chen, K. Liao, Y. Wan, D. Z. Chen, and J. Wu, "Danets: Deep abstract networks for tabular data classification and regression," in AAAI, 2022, pp. 3930-3938. 2, 9, 13 +[56] J. Chen, K. Liao, Y. Fang, D. Chen, and J. Wu, "Tabcaps: A capsule neural network for tabular data classification with bow routing," in ICLR, 2023. 2 +[57] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, "Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's," in KDD, 2024, pp. 3679-3689. 2 +[58] C. Xu, Y.-C. Huang, J. Y.-C. Hu, W. Li, A. Gilani, H.-S. Goan, and H. Liu, "Bishop: Bi-directional cellular learning for tabular data with generalized sparse modern hopfield model," in ICML, 2024, pp. 55048-55075. 2, 7, 9, 12 + +[59] S. Badirli, X. Liu, Z. Xing, A. Bhowmik, and S. S. Keerthi, "Gradient boosting neural networks: Grownet," CoRR, vol. abs/2002.07971, 2020. 2, 7, 8, 9, 12, 19 +[60] S. Popov, S. Morozov, and A. Babenko, “Neural oblivious decision ensembles for deep learning on tabular data,” in ICLR, 2020. 2, 8, 9, 12, 19, 20 +[61] C.-H. Chang, R. Caruana, and A. Goldenberg, "NODE-GAM: neural generalized additive model for interpretable deep learning," in ICLR, 2022. 2, 3, 8, 9, 12, 20 +[62] W. Song, C. Shi, Z. Xiao, Z. Duan, Y. Xu, M. Zhang, and J. Tang, "Autoint: Automatic feature interaction learning via self-attentive neural networks," in CIKM, 2019, pp. 1161-1170. 3, 7, 9, 11, 13 +[63] X. Huang, A. Khetan, M. Cvitkovic, and Z. S. Karnin, "Tabransformer: Tabular data modeling using contextual embeddings," CoRR, vol. abs/2012.06678, 2020. 3, 7, 8, 9, 11, 13, 14, 19, 20 +[64] Q.-L. Zhou, H.-J. Ye, L. Wang, and D.-C. Zhan, "Unlocking the transferability of tokens in deep models for tabular data," CoRR, vol. abs/2310.15149, 2023. 3, 9, 13, 15 +[65] J. Chen, J. Yan, Q. Chen, D. Z. Chen, J. Wu, and J. Sun, "Can a deep learning model be a sure bet for tabular prediction?" in KDD, 2024, pp. 288-296. 3, 7, 8, 9, 12, 13 +[66] A. Jeffares, T. Liu, J. Crabbé, F. Imrie, and M. van der Schaar, "Tangos: Regularizing tabular neural networks through gradient orthogonalization and specialization," in ICLR, 2023. 3, 9, 10 +[67] H. Ye, W. Fan, X. Song, S. Zheng, H. Zhao, D. dan Guo, and Y. Chang, "Ptarl: Prototype-based tabular representation learning via space calibration," in ICLR, 2024. 3, 9, 10 +[68] Y. Nader, L. Sixt, and T. Landgraf, "DNNR: differential nearest neighbors regression," in ICML, 2022, pp. 16296-16317. 3, 7, 9, 10 +[69] Y. Gorishniy, I. Rubachev, N. Kartashev, D. Shlenskii, A. Kotelnikov, and A. Babenko, "Tabr: Tabular deep learning meets nearest neighbors in 2023," in ICLR, 2024. 3, 6, 7, 9, 10, 19 +[70] G. Somepalli, A. Schwarzschild, M. Goldblum, C. B. Bruss, and T. Goldstein, "SAINT: Improved neural networks for tabular data via row attention and contrastive pre-training," in NeurIPS Workshop, 2022. 3, 7, 9, 10, 11, 13, 14 +[71] I. Rubachev, A. Alekberov, Y. Gorishniy, and A. Babenko, "Revisiting pretraining objectives for tabular deep learning," CoRR, vol. abs/2207.03208, 2022. 3, 7, 13, 14 +[72] S. Onishi, K. Oono, and K. Hayashi, "Tabret: Pre-training transformer-based tabular models for unseen columns," CoRR, vol. abs/2303.15747, 2023. 3, 9, 12, 15 +[73] J. Shen, L. Li, L. M. Dery, C. Staten, M. Khodak, G. Neubig, and A. Talwalkar, "Cross-modal fine-tuning: Align then refine," in ICML, 2023, pp. 31030-31056. 3, 9, 13, 15 +[74] Y. Zhu, T. Brettin, F. Xia, A. Partin, M. Shukla, H. Yoo, Y. A. Evrard, J. H. Doroshow, and R. L. Stevens, "Converting tabular data into images for deep learning with convolutional neural networks," Scientific Reports, vol. 11, no. 11325, 2021. 3, 4, 9, 17 +[75] S. Lee and S.-C. Lee, "Tableye: Seeing small tables through the lens of images," CoRR, vol. abs/2307.02491, 2023. 3, 9, 17 +[76] A. Mamdouh, M. El-Melegy, S. Ali, and R. Kikinis, "Tab2visual: Overcoming limited data in tabular data classification using deep learning with visual representations," CoRR, vol. abs/2502.07181, 2025.3,9,17 +[77] Z. Wang and J. Sun, "Transtab: Learning transferable tabular transformers across tables," in NeurIPS, 2022, pp. 2902-2915. 3, 9, 13, 16 +[78] J. Yan, B. Zheng, H. Xu, Y. Zhu, D. Z. Chen, J. Sun, J. Wu, and J. Chen, "Making pre-trained language models great on tabular prediction," in ICLR, 2024. 3, 6, 9, 16, 17 +[79] C. Ye, G. Lu, H. Wang, L. Li, S. Wu, G. Chen, and J. Zhao, "Towards cross-table masked pretraining for web data mining," in WWW, 2024, pp. 4449-4459. 3, 6, 9, 16 +[80] S. Hegselmann, A. Buendia, H. Lang, M. Agrawal, X. Jiang, and D. Sontag, "Tabllm: few-shot classification of tabular data with large language models," in AISTATS, 2023, pp. 5549-5581. 3, 9, 13, 16, 17 +[81] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, "From supervised to generative: A novel paradigm for tabular deep learning with large language models," in SIGKDD, 2024, pp. 3323-3333. 3, 6 +[82] N. Hollmann, S. Müller, and F. Hutter, "Large language models for automated data science: Introducing CAAFE for context-aware automated feature engineering," in NeurIPS, 2023, pp. 44753-44775. 3, 9 + +[83] S. Han, J. Yoon, S. Ö. Arik, and T. Pfister, "Large language models can automatically engineer features for few-shot tabular learning," in ICML, 2024, pp. 17454-17479. 3, 9 +[84] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., "A comprehensive survey on pretrained foundation models: A history from bert to chatgpt," International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024. 3, 17 +[85] Y. Liang, H. Wen, Y. Nie, Y. Jiang, M. Jin, D. Song, S. Pan, and Q. Wen, "Foundation models for time series analysis: A tutorial and survey," in SIGKDD, 2024, pp. 6555-6565. 3 +[86] H.-J. Ye, Q.-L. Zhou, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, "Rethinking pre-training in tabular data: A neighborhood embedding perspective," CoRR, vol. abs/2311.00055, 2025. 3, 9, 18 +[87] D. Bonet, D. M. Montserrat, X. G. i Nieto, and A. G. Ioannidis, "Hyperfast: Instant classification for tabular data," in AAAI, 2024, pp. 11 114-11 123. 3, 7, 9, 18 +[88] A. Müller, C. Curino, and R. Ramakrishnan, "Mothernet: Fast training and inference via hyper-network transformers," in ICLR, 2025. 3, 8, 9, 18 +[89] N. Hollmann, S. Müller, K. Eggensperger, and F. Hutter, "Tabpfn: A transformer that solves small tabular classification problems in a second," in ICLR, 2023. 3, 6, 7, 8, 9, 10, 16, 18, 19 +[90] V. Thomas, J. Ma, R. Hosseinzadeh, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, "Retrieval & fine-tuning for in-context tabular models," in NeurIPS, 2024, pp. 108439-108467. 3, 10, 19 +[91] N. Hollmann, S. Müller, L. Purucker, A. Krishnakumar, M. Körfer, S. B. Hoo, R. T. Schirrmeister, and F. Hutter, "Accurate predictions on small data with a tabular foundation model," Nature, vol. 637, no. 8045, pp. 319-326, 2025. 3, 9, 10, 18, 19, 20 +[92] J. Gardner, J. C. Perdomo, and L. Schmidt, "Large scale transfer learning for tabular data via language modeling," in NeurIPS, 2024, pp. 45155-45205. 3, 6, 9, 19 +[93] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, "From supervised to generative: A novel paradigm for tabular deep learning with large language models," in SIGKDD, 2024, pp. 3323-3333. 3, 9, 17, 19 +[94] X. Wen, S. Zheng, Z. Xu, Y. Sun, and J. Bian, "Scalable in-context learning on tabular data via retrieval-augmented large language models," CoRR, vol. abs/2502.03147, 2025. 3, 9, 19 +[95] Y. Gorishniy, A. Kotelnikov, and A. Babenko, "Tabm: Advancing tabular deep learning with parameter-efficient ensembling," CoRR, vol. abs/2410.24210, 2024. 3, 20, 21 +[96] S.-Y. Liu and H.-J. Ye, "Tabpfn unleashed: A scalable and effective solution to tabular classification problems," CoRR, vol. abs/2502.02527, 2025. 3, 17, 19, 20 +[97] J. Svirsky and O. Lindenbaum, "Interpretable deep clustering for tabular data," in ICML, 2024, pp. 47314-47330. 3, 20 +[98] H. T. Rauf, A. Freitas, and N. W. Paton, "Tabledc: Deep clustering for tabular data," CoRR, vol. abs/2405.17723, 2024. 3, 20 +[99] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, "Adbench: Anomaly detection benchmark," in NeurIPS, 2022, pp. 32142-32159. 3, 20 +[100] T. Shenkar and L. Wolf, "Anomaly detection for tabular data with internal contrastive learning," in ICLR, 2022. 3 +[101] J. Yin, Y. Qiao, Z. Zhou, X. Wang, and J. Yang, "MCM: masked cell modeling for anomaly detection in tabular data," in ICLR, 2024. 3 +[102] L. Hansen, N. Seedat, M. van der Schaar, and A. Petrovic, "Reimagining synthetic tabular data generation through data-centric AI: A comprehensive benchmark," in NeurIPS, 2023, pp. 33781-33823. 3 +[103] C. Hou, S. Gu, C. Xu, and Y. Qian, "Incremental learning for simultaneous augmentation of feature and class," IEEE Transactions on pattern analysis and machine intelligence, vol. 45, no. 12, pp. 14789-14806, 2023. 3 +[104] M. Vero, M. Balunovic, and M. T. Vechev, "Cuts: Customizable tabular synthetic data generation," in ICML, 2024, pp. 49408-49433. 3 +[105] S. Ö. Arik and T. Pfister, "Tabnet: Attentive interpretable tabular learning," in AAAI, 2021, pp. 6679-6687. 3, 7, 8, 9, 12, 20 +[106] P. Hager, M. J. Menten, and D. Rueckert, "Best of both worlds: Multimodal contrastive learning with tabular and imaging data," in CVPR, 2023, pp. 23924-23935. 3, 7, 21 +[107] J.-P. Jiang, H.-J. Ye, L. Wang, Y. Yang, Y. Jiang, and D.-C. Zhan, "Tabular insights, visual impacts: Transferring expertise from tables to images," in ICML, 2024, pp. 21988-22009. 3, 7, 21 + +[108] Y. Diao, Y. Yang, Q. Li, B. He, and M. Lu, "Oebench: Investigating open environment challenges in real-world relational data streams," VLDB, vol. 17, no. 6, pp. 1283-1296, 2024. 3 +[109] I. Rubachev, N. Kartashev, Y. Gorishniy, and A. Babenko, "Tabred: A benchmark of tabular machine learning in-the-wild," CoRR, vol. abs/2406.19380, 2024. 3, 6, 8, 20, 21 +[110] J. Gardner, Z. Popovic, and L. Schmidt, "Benchmarking distribution shift in tabular data with tableshift," in NeurIPS, 2024, pp. 53385-53432. 3, 20, 21 +[111] Z.-H. Zhou, "Learnability with time-sharing computational resource concerns," National Science Review, vol. 11, no. 10, p. nwae204, 2024. 3 +[112] N. Jin, J. Siebert, D. Li, and Q. Chen, "A survey on table question answering: recent advances," in CCKS, 2022, pp. 174-186. 3, 21 +[113] X. Fang, W. Xu, F. A. Tan, J. Zhang, Z. Hu, Y. Qi, S. Nickleach, D. Socolinsky, S. Sengamedu, and C. Faloutsos, "Large language models (llms) on tabular data: Prediction, generation, and understanding-a survey," CoRR, vol. abs/2402.17944, 2024. 3, 21 +[114] C. Winship and R. D. Mare, "Regression models with ordinal variables," American sociological review, vol. 49, no. 4, pp. 512-525, 1984. 3 +[115] P. A. Gutierrez, M. Perez-Ortiz, J. Sánchez-Monedero, F. Fernández-Navarro, and C. Hervás-Martínez, "Ordinal regression methods: Survey and experimental study," IEEE Trans. Knowl. Data Eng., vol. 28, no. 1, pp. 127-146, 2016. 3 +[116] A. Jeffares, A. Curth, and M. van der Schaar, "Deep learning through A telescoping lens: A simple model provides empirical insights on grokking, gradient boosting & beyond," in NeurIPS, 2024, pp. 123-498-123-533. 4 +[117] G. Cormode, P. Indyk, N. Koudas, and S. Muthukrishnan, "Fast mining of massive tabular data via approximate distance computations," in ICDE, 2002, pp. 605-614. 4 +[118] M. D. Adelfio and H. Samet, "Schema extraction for tabular data on the web," VLDB, vol. 6, no. 6, pp. 421-432, 2013. 4 +[119] J. F. Arias, A. K. Chhabra, and V. Misra, "Efficient interpretation of tabular documents," in ICPR, 1996, pp. 681-685. 4 +[120] H.-L. Wang, S.-H. Wu, K. K. Wang, C.-L. Sung, W.-L. Hsu, and W.-K. Shih, "Semantic search on internet tabular information extraction for answering queries," in CIKM, 2000, pp. 243-249. 4 +[121] M.-J. Nederhof, "An optimal tabular parsing algorithm," in ACL, 1994, pp. 117-124. 4 +[122] J. F. Arias, A. K. Chhabra, and V. Misra, "Interpreting and representing tabular documents," in CVPR, 1996, pp. 600-605. 4 +[123] G. Richards and V. J. Rayward-Smith, "Discovery of association rules in tabular data," in ICDM, 2001, pp. 465-472. 4 +[124] J. R. Quinlan, "Induction of decision trees," Machine learning, vol. 1, pp. 81-106, 1986. 4 +[125] L. Breiman, J. Friedman, R. Olshen, and C. J. Stone, Classification and Regression Trees. Chapman and Hall/CRC, 1984. 4 +[126] Y. Freund and R. E. Schapire, “A desicion-theoretic generalization of on-line learning and an application to boosting,” in EuroCOLT, 1995, pp. 23-37. 4, 19 +[127] L. Breiman, "Random forests," Machine Learning, vol. 45, no. 1, pp. 5-32, 2001. 4, 19 +[128] J. H. Friedman, "Greedy function approximation: a gradient boosting machine," Annals of statistics, pp. 1189-1232, 2001. 4 +[129] ——, "Stochastic gradient boosting," Computational statistics & data analysis, vol. 38, no. 4, pp. 367-378, 2002. 4 +[130] T. Chen and C. Guestrin, "Xgboost: A scalable tree boosting system," in KDD, 2016, pp. 785-794. 4, 8, 18, 20 +[131] G. Ke, Q. Meng, T. Finley, T. Wang, W. Chen, W. Ma, Q. Ye, and T.-Y. Liu, "Lightgbm: A highly efficient gradient boosting decision tree," in NIPS, 2017, pp. 3146-3154. 4, 8, 20 +[132] L. O. Prokhorenkova, G. Gusev, A. Vorobev, A. V. Dorogush, and A. Gulin, "Catboost: unbiased boosting with categorical features," in NeurIPS, 2018, pp. 6639-6649. 4, 8 +[133] D. Nielsen, "Tree boosting with xgboost-why does xgboost win "every" machine learning competition?" Master's thesis, NTNU, 2016. 4 +[134] S. Makridakis, E. Spiliotis, and V. Assimakopoulos, "M5 accuracy competition: Results, findings, and conclusions," International Journal of Forecasting, vol. 38, no. 4, pp. 1346-1364, 2022. 4 +[135] H. Larochelle, D. Erhan, A. Courville, J. Bergstra, and Y. Bengio, "An empirical evaluation of deep architectures on problems with many factors of variation," in ICML, 2007, pp. 473-480. 4 + +[136] R. Salakhutdinov and G. Hinton, "Learning a nonlinear embedding by preserving class neighbourhood structure," in AISTATS, 2007, pp. 412-419. 4 +[137] R. Min, D. A. Stanley, Z. Yuan, A. Bonner, and Z. Zhang, “A deep non-linear feature mapping for large-margin knn classification,” in ICDM, 2009, pp. 357-366. 4 +[138] M. Ahmed, A. N. Mahmood, and J. Hu, "A survey of network anomaly detection techniques," Journal of Network and Computer Applications, vol. 60, pp. 19-31, 2016. 4 +[139] L. Lu, M. Medo, C. H. Yeung, Y.-C. Zhang, Z.-K. Zhang, and T. Zhou, "Recommender systems," Physics reports, vol. 519, no. 1, pp. 1-49, 2012. 4 +[140] D. Salinas, V. Flunkert, J. Gasthaus, and T. Januschowski, "Deepar: Probabilistic forecasting with autoregressive recurrent networks," International journal of forecasting, vol. 36, no. 3, pp. 1181-1191, 2020. 4 +[141] T.-J. Huang, X.-Y. Chen, and H.-J. Ye, "Seqfusion: Sequential fusion of pre-trained models for zero-shot time-series forecasting," CoRR, vol. abs/2503.02836, 2025. 4 +[142] Q. Liu, F. Yu, S. Wu, and L. Wang, "A convolutional click prediction model," in CIKM, 2015, pp. 1743-1746. 4 +[143] H. Guo, R. Tang, Y. Ye, Z. Li, and X. He, "Deepfm: A factorization-machine based neural network for CTR prediction," in IJCAI, 2017, pp. 1725-1731. 4 +[144] S. Somvanshi, S. Das, S. A. Javed, G. Antariksa, and A. Hossain, "A survey on deep tabular learning," CoRR, vol. abs/2410.12034, 2024. 4 +[145] D. Lane, D. Scott, M. Hebl, R. Guerra, D. Osherson, and H. Zimmer, Introduction to statistics. CiteSeer, 2003. 4 +[146] A. F. Karr, A. P. Sanil, and D. L. Banks, "Data quality: A statistical perspective," Statistical Methodology, vol. 3, no. 2, pp. 137-173, 2006. 4 +[147] A. Sánchez-Morales, J.-L. Sancho-Gómez, J.-A. Martínez-García, and A. R. Figueiras-Vidal, "Improving deep learning performance with missing values via deletion and compensation," Neural Computing and Applications, vol. 32, pp. 13233-13244, 2020. 4 +[148] D. Chicco, L. Oneto, and E. Tavazzi, "Eleven quick tips for data cleaning and feature engineering," PLOS Computational Biology, vol. 18, no. 12, p. e1010718, 2022. 4 +[149] Y. Luo, M. Wang, H. Zhou, Q. Yao, W.-W. Tu, Y. Chen, W. Dai, and Q. Yang, "Autocross: Automatic feature crossing for tabular data in real-world applications," in KDD, 2019, pp. 1936-1945. 4 +[150] H. He and E. A. Garcia, "Learning from imbalanced data," IEEE Transactions on knowledge and data engineering, vol. 21, no. 9, pp. 1263-1284, 2009. 5 +[151] H. He and Y. Ma, Imbalanced learning: foundations, algorithms, and applications. John Wiley & Sons, 2013. 5 +[152] T. Lin, P. Goyal, R. B. Girshick, K. He, and P. Dollar, "Focal loss for dense object detection," in ICCV, 2017, pp. 2999-3007. 5 +[153] J. M. Johnson and T. M. Khoshgoftaar, "Survey on deep learning with class imbalance," Journal of big data, vol. 6, no. 1, pp. 1-54, 2019. 5 +[154] J. Engelmann and S. Lessmann, "Conditional Wasserstein gan-based oversampling of tabular data for imbalanced learning," Expert Systems with Applications, vol. 174, p. 114582, 2021. 5 +[155] R. Sauber-Cole and T. M. Khoshgoftaar, "The use of generative adversarial networks to alleviate class imbalance in tabular data: a survey," Journal of Big Data, vol. 9, no. 1, p. 98, 2022. 5, 21 +[156] X.-Y. Liu, J. Wu, and Z.-H. Zhou, "Exploratory undersampling for class-imbalance learning," IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550, 2008. 5 +[157] N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer, "SMOTE: synthetic minority over-sampling technique," Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002. 5 +[158] A. Fernández, S. García, F. Herrera, and N. V. Chawla, "SMOTE for learning from imbalanced data: Progress and challenges, marking the 15-year anniversary," Journal of Artificial Intelligence Research, vol. 61, pp. 863-905, 2018. 5 +[159] K. Cao, C. Wei, A. Gaidon, N. Arechiga, and T. Ma, "Learning imbalanced datasets with label-distribution-aware margin loss," in NeurIPS, 2019, pp. 1567-1578. 5 +[160] Y. Cui, M. Jia, T.-Y. Lin, Y. Song, and S. Belongie, "Class-balanced loss based on effective number of samples," in CVPR, 2019, pp. 9268-9277. 5 +[161] Y. Xie, Z. Wang, Y. Li, B. Ding, N. M. Gurel, C. Zhang, M. Huang, W. Lin, and J. Zhou, "Fives: Feature interaction via edge search for large-scale tabular data," in SIGKDD, 2021, pp. 3795-3805. 5 + +[162] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, "Annotatedtables: A large tabular dataset with language model annotations," CoRR, vol. abs/2406.16349, 2024. 5 +[163] A. Klein and F. Hutter, "Tabular benchmarks for joint architecture and hyperparameter optimization," CoRR, vol. abs/1905.04970, 2019. 5 +[164] P. Pokhrel, "A comparison of automl hyperparameter optimization tools for tabular data," Ph.D. dissertation, Youngstown State University, 2023. 5 +[165] F. Hutter, L. Kotthoff, and J. Vanschoren, Automated machine learning: methods, systems, challenges. Springer Nature, 2019. 5 +[166] X. He, K. Zhao, and X. Chu, "Automl: A survey of the state-of-the-art," Knowledge-based systems, vol. 212, p. 106622, 2021. 5 +[167] M. Feurer, K. Eggensperger, S. Falkner, M. Lindauer, and F. Hutter, "Auto-sklearn 2.0: Hands-free automl via meta-learning," Journal of Machine Learning Research, vol. 23, no. 261, pp. 1-61, 2022. 5 +[168] C. Mennella, U. Maniscalco, G. De Pietro, and M. Esposito, "Ethical and regulatory challenges of ai technologies in healthcare: A narrative review," Heliyon, vol. 10, no. 4, 2024. 5 +[169] W. Moore and S. Frye, "Review of hipaa, part 1: history, protected health information, and privacy and security rules," Journal of nuclear medicine technology, vol. 47, no. 4, pp. 269-272, 2019. 5 +[170] D. F. Sittig and H. Singh, "Legal, ethical, and financial dilemmas in electronic health record adoption and use," Pediatrics, vol. 127, no. 4, pp. e1042-e1047, 2011. 5 +[171] J. Amann, A. Blasimme, E. Vayena, D. Frey, V. I. Madai, and P. Consortium, "Explainability for artificial intelligence in healthcare: a multidisciplinary perspective," BMC medical informatics and decision making, vol. 20, pp. 1-9, 2020. 5 +[172] B. S. Caffo, F. A. D'Asaro, A. Garcez, and E. Raffinetti, "Explainable artificial intelligence models and methods in finance and healthcare," p. 970246, 2022. 5 +[173] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger, "On calibration of modern neural networks," in ICML, 2017, pp. 1321-1330. 5 +[174] K. Helli, D. Schnurr, N. Hollmann, S. Müller, and F. Hutter, "Drift-resilient tabpfn: In-context learning temporal distribution shifts on tabular data," in NeurIPS, 2024, pp. 98742-98781. 5, 21 +[175] J. Demsr, "Statistical comparisons of classifiers over multiple data sets," Journal of Machine Learning Research, vol. 7, pp. 1-30, 2006. 5 +[176] Y. Gorishniy, A. Kotelnikov, and A. Babenko, "Tabm: Advancing tabular deep learning with parameter-efficient ensembling," in ICLR, 2025. 5, 19 +[177] M. E. Glickman and A. C. Jones, "Rating the chess rating system," CHANCE-BERLIN THEN NEW YORK-, vol. 12, pp. 21-28, 1999. 5 +[178] L. M. Hvattum and H. Arntzen, "Using elo ratings for match result prediction in association football," International Journal of forecasting, vol. 26, no. 3, pp. 460-470, 2010. 5 +[179] J. Ma, V. Thomas, R. Hosseinzadeh, H. Kamkari, A. Labach, J. C. Cresswell, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, "Tabdpt: Scaling tabular foundation models," CoRR, vol. abs/2410.18164, 2024. 6, 18, 19 +[180] A. Tschalzev, L. Purucker, S. Lüdtke, F. Hutter, C. Bartelt, and H. Stuckenschmidt, "Unreflected use of tabular data repositories can undermine research quality," in ICLR Workshop, 2025. 6, 7 +[181] S. B. Rabbani, I. V. Medri, and M. D. Samad, "Attention versus contrastive learning of tabular data - A data-centric benchmarking," CoRR, vol. abs/2401.04266, 2024. 6 +[182] Y. Yang, Y. Wang, G. Liu, L. Wu, and Q. Liu, "Unitabe: A universal pretraining protocol for tabular foundation model in data science," in ICLR, 2024. 6, 9, 16 +[183] G. Eggert, K. Huo, M. Biven, and J. Waugh, "Tablib: A dataset of 627m tables with context," CoRR, vol. abs/2310.07875, 2023. 6 +[184] H. W. Jian Yang, Xuefeng Li, "DeepTables: A Deep Learning Python Package for Tabular Data," https://github.com/DataCanvasIO/DeepTables, 2022, version 0.2.x.6 +[185] N. Erickson, J. Mueller, A. Shirkov, H. Zhang, P. Larroy, M. Li, and A. Smola, "Autogluon-tabular: Robust and accurate automl for structured data," CoRR, vol. abs/2003.06505, 2020. 6 +[186] M. Joseph, "Pytorch tabular: A framework for deep learning with tabular data," CoRR, vol. abs/2104.13638, 2021. 6 +[187] J. R. Zaurin and P. Mulinka, "pytorch-widedeep: A flexible package for multimodal deep learning," Journal of Open Source Software, vol. 8, no. 86, p. 5027, Jun. 2023. 6 + +[188] S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and H.-J. Ye, "TALENT: A tabular analytics and learning toolbox," CoRR, vol. abs/2407.04057, 2024. 6 +[189] T. Akiba, S. Sano, T. Yanase, T. Ohta, and M. Koyama, "Optuna: A next-generation hyperparameter optimization framework," in KDD, 2019, pp. 2623-2631. 6 +[190] N. Morgan and H. Bourlard, "Generalization and parameter estimation in feedforward nets: Some experiments," in NeuIPS, 1989, pp. 630-637. 7 +[191] S. Arlot and A. Celisse, "A survey of cross-validation procedures for model selection," CoRR, vol. abs/0907.4728, 2009. 7 +[192] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, T.-W. Chen, and T.-H. Chang, "Prompt: Towards a better deep neural network for tabular data," in ICML, 2023, pp. 4392-4434. 7, 9, 10 +[193] S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, "GRANDE: gradient-based decision tree ensembles for tabular data," in ICLR, 2024. 7, 8, 9, 12, 19 +[194] X. Jiang, A. Margeloiu, N. Simidjievski, and M. Jamnik, "Protogate: Prototype-based neural networks with global-to-local feature selection for tabular biomedical data," in ICML, 2024, pp. 21844-21878. 7 +[195] G. C. Cawley and N. L. C. Talbot, "On over-fitting in model selection and subsequent selection bias in performance evaluation," Journal of Machine Learning Research, vol. 11, pp. 2079-2107, 2010. 7 +[196] T. G. Dietterich, "Approximate statistical tests for comparing supervised classification learning algorithms," Neural Computation, vol. 10, no. 7, pp. 1895-1923, 1998. 7 +[197] S. Raschka, "Model evaluation, model selection, and algorithm selection in machine learning," CoRR, vol. abs/1811.12808, 2018. 7 +[198] H. Schulz-Kumpel, S. Fischer, T. Nagler, A. Boulesteix, B. Bischl, and R. Hornung, "Constructing confidence intervals for 'the' generalization error - a comprehensive benchmark study," CoRR, vol. abs/2409.18836, 2024. 7 +[199] T. Nagler, L. Schneider, B. Bischl, and M. Feurer, "Reshuffling resampling splits can improve generalization of hyperparameter optimization," in NeurIPS, 2024. 7 +[200] J. Feng, Y. Yu, and Z. Zhou, "Multi-layered gradient boosting decision trees," in NeurIPS, 2018, pp. 3555-3565. 7 +[201] I. Padhi, Y. Schiff, I. Melnyk, M. Rigotti, Y. Mroueh, P. Dognin, J. Ross, R. Nair, and E. Altman, "Tabular transformers for modeling multivariate time series," in ICASSP, 2021, pp. 3565-3569. 7 +[202] F. Di Martino and F. Delmastro, "Explainable ai for clinical and remote health applications: a survey on tabular and time series data," Artificial Intelligence Review, vol. 56, no. 6, pp. 5261-5315, 2023. 7 +[203] G. M. Van de Ven, T. Tuytelaars, and A. S. Tolias, "Three types of incremental learning," Nature Machine Intelligence, vol. 4, no. 12, pp. 1185-1197, 2022. 7 +[204] D.-W. Zhou, Q.-W. Wang, Z.-H. Qi, H.-J. Ye, D.-C. Zhan, and Z. Liu, "Class-incremental learning: A survey," IEEE transactions on pattern analysis and machine intelligence, vol. 46, no. 12, pp. 9851-9873, 2024. 7 +[205] J. Yosinski, J. Clune, Y. Bengio, and H. Lipson, "How transferable are features in deep neural networks?" in NIPS, vol. 27, 2014. 7 +[206] S. U. H. Dar, M. Özbey, A. B. Çatlı, and T. Çukur, "A transfer-learning approach for accelerated mri using deep neural networks," Magnetic resonance in medicine, vol. 84, no. 2, pp. 663-685, 2020. 7 +[207] Y. Cao, Z. Fang, Y. Wu, D.-X. Zhou, and Q. Gu, "Towards understanding the spectral bias of deep learning," CoRR, vol. abs/1912.01198, 2019. 7 +[208] R. Basri, M. Galun, A. Geifman, D. Jacobs, Y. Kasten, and S. Kritchman, "Frequency bias in neural networks for input of non-uniform density," in ICML, 2020, pp. 685-694. 7 +[209] F. Matteucci, V. Arzamasov, and K. Böhm, "A benchmark of categorical encoders for binary classification," in NeurIPS, 2023, pp. 54855-54875. 8 +[210] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, "Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's," in SIGKDD, 2024, pp. 3679-3689. 8 +[211] M. Pang, K. M. Ting, P. Zhao, and Z. Zhou, "Improving deep forest by screening," IEEE Transactions on Knowledge and Data Engineering., vol. 34, no. 9, pp. 4298-4312, 2022. 8 +[212] M. T. Ribeiro, S. Singh, and C. Guestrin, "why should I trust you?: Explaining the predictions of any classifier," in KDD, 2016, pp. 1135-1144. 8 + +[213] S. M. Lundberg and S. Lee, “A unified approach to interpreting model predictions,” in NIPS, 2017, pp. 4765-4774. 8 +[214] Z.-H. Zhou and J. Feng, "Deep forest," National science review, vol. 6, no. 1, pp. 74-86, 2019. 8 +[215] Y. Cheng, R. Hu, H. Ying, X. Shi, J. Wu, and W. Lin, "Arithmetic feature interaction is necessary for deep tabular learning," in AAAI, 2024, pp. 11516-11524. 9, 12, 13 +[216] J. Kossen, N. Band, C. Lyle, A. N. Gomez, T. Rainforth, and Y. Gal, "Self-attention between datapoints: Going beyond individual input-output pairs in deep learning," in NeurIPS, 2021, pp. 28742-28756. 9 +[217] B. Schäfl, L. Gruber, A. Bitto-Nemling, and S. Hochreiter, "Hop- ular: Modern hopfield networks for tabular data," CoRR, vol. abs/2206.00664, 2022. 9, 10 +[218] H. Kim, A. Mnih, J. Schwarz, M. Garnelo, S. M. A. Eslami, D. Rosenbaum, O. Vinyals, and Y. W. Teh, "Attentive neural processes," in ICLR, 2019. 9, 10 +[219] I. Shavitt and E. Segal, "Regularization learning networks: deep learning for tabular datasets," in NeurIPS, 2018, pp. 1386-1396. 9, 10 +[220] V. Verma, T. Luong, K. Kawaguchi, H. Pham, and Q. V. Le, "Towards domain-agnostic contrastive learning," in ICML, 2021, pp. 10530-10541. 9, 14 +[221] C. Lee, F. Imrie, and M. van der Schaar, "Self-supervision enhanced feature selection with correlated gates," in ICLR, 2022. 9, 14 +[222] R. Levin, V. Cherepanova, A. Schwarzschild, A. Bansal, C. B. Bruss, T. Goldstein, A. G. Wilson, and M. Goldblum, "Transfer learning with deep tabular models," in ICLR, 2023. 9, 13, 14, 15 +[223] K. Majmundar, S. Goyal, P. Netrapalli, and P. Jain, "MET: masked encoding for tabular data," CoRR, vol. abs/2206.08564, 2022. 9, 14 +[224] E. Hajiramezanali, N. L. Diamant, G. Scalia, and M. W. Shen, "Stab: Self-supervised learning for tabular data," in NeurIPS Workshop, 2022. 9, 14 +[225] S. Chen, J. Wu, N. Hovakimyan, and H. Yao, "Recontab: Regularized contrastive representation learning for tabular data," CoRR, vol. abs/2310.18541, 2023. 9, 14 +[226] W.-W. Du, W.-Y. Wang, and W.-C. Peng, "Dora: Domain-based self-supervised learning framework for low-resource real estate appraisal," in CIKM, 2023, pp. 4552-4558. 9, 14 +[227] Y. Sui, T. Wu, J. C. Cresswell, G. Wu, G. Stein, X. S. Huang, X. Zhang, and M. Volkovs, "Self-supervised representation learning from random data projectors," in ICLR, 2024. 9, 14 +[228] T. Iwata and A. Kumagai, "Meta-learning from tasks with heterogeneous attribute spaces," in NeurIPS, 2020, pp. 6053-6063. 9, 13, 15 +[229] L. Liu, M. M. Fard, and S. Zhao, "Distribution embedding networks for generalization from a diverse set of classification tasks," Transactions on Machine Learning Research, 2022. 9, 15 +[230] B. Zhu, X. Shi, N. Erickson, M. Li, G. Karypis, and M. Shoaran, "Xtab: Cross-table pretraining for tabular transformers," in ICML, 2023, pp. 43181-43204. 9, 12, 13, 15 +[231] Y. Zhang, K. Gong, K. Zhang, H. Li, Y. Qiao, W. Ouyang, and X. Yue, "Meta-transformer: A unified framework for multimodal learning," CoRR, vol. abs/2307.10802, 2023. 9, 15 +[232] G. Liu, J. Yang, and L. Wu, "Ptab: Using the pre-trained language model for modeling tabular data," CoRR, vol. abs/2209.08060, 2022. 9, 16 +[233] M. J. Kim, L. Grinsztajn, and G. Varoquaux, "CARTE: pretraining and transfer for tabular learning," in ICML, 2024, pp. 23843-23866. 9, 16, 17 +[234] Z. Cheng, T. Xie, P. Shi, C. Li, R. Nadkarni, Y. Hu, C. Xiong, D. Radev, M. Ostendorf, L. Zettlemoyer, N. A. Smith, and T. Yu, "Binding language models in symbolic languages," in ICLR, 2023. 9, 16 +[235] T. Zhang, S. Wang, S. Yan, L. Jian, and Q. Liu, "Generative table pre-training empowers models for tabular prediction," in EMNLP, 2023. 9, 16 +[236] T. Dinh, Y. Zeng, R. Zhang, Z. Lin, M. Gira, S. Rajput, J. yong Sohn, D. S. Papailiopoulos, and K. Lee, "LIFT: language-interfaced fine-tuning for non-language machine learning tasks," in NeurIPS, 2022, pp. 11763-11784. 9, 16 +[237] R. Wang, Z. Wang, and J. Sun, "Unipredict: Large language models are universal tabular predictors," CoRR, vol. abs/2310.03266, 2023. 9, 16 +[238] A. Sharma, E. Vans, D. Shigemizu, K. A. Boroevich, and T. Tsunoda, "Deepinsight: A methodology to transform a non-image + +data to an image for convolution neural network architecture," Scientific reports, vol. 9, no. 1, p. 11399, 2019. 9, 17 +[239] O. Bazgir, R. Zhang, S. R. Dhruba, R. Rahman, S. Ghosh, and R. Pal, "Representation of features as images with neighborhood dependencies for compatibility with convolutional neural networks," Nature communications, vol. 11, no. 1, p. 4391, 2020. 9, 17 +[240] L. Buturovic and D. Miljkovic, "A novel method for classification of tabular data using convolutional neural networks," BioRxiv, pp. 2020-05, 2020. 9, 17 +[241] V. Gómez-Martínez, F. J. Lara-Abelenda, P. Peiro-Corbacho, D. Chushig-Muzo, C. Granja, and C. Soguero-Ruiz, "LM-IGTD: a 2d image generator for low-dimensional and mixed-type tabular data to leverage the potential of convolutional neural networks," CoRR, vol. abs/2406.14566, 2024. 9, 17 +[242] B. Sun, L. Yang, W. Zhang, M. Lin, P. Dong, C. Young, and J. Dong, "Supertml: Two-dimensional word embedding for the precognition on structured tabular data," in CVPR Workshops, 2019. 9, 17 +[243] Z. Wang, C. Gao, C. Xiao, and J. Sun, "Meditab: Scaling medical tabular data predictors via data consolidation, enrichment, and refinement," in *IJCAI*, 2024, pp. 6062-6070. 9, 19 +[244] R. Bommasani, D. A. Hudson, E. Adeli, R. Altman, S. Arora, S. von Arx, M. S. Bernstein, J. Bohg, A. Bosselut, E. Brunskill et al., "On the opportunities and risks of foundation models," CoRR, vol. abs/2108.07258, 2021. 8 +[245] J. Goldberger, G. E. Hinton, S. Roweis, and R. R. Salakhutdinov, "Neighbourhood components analysis," in NIPS, vol. 17, 2004. 10 +[246] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D. M. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. McCandlish, A. Radford, I. Sutskever, and D. Amodei, "Language models are few-shot learners," in NeurIPS, 2020, pp. 1877-1901. 10, 21 +[247] R. Tibshirani, "Regression shrinkage and selection via the lasso," Journal of the Royal Statistical Society Series B: Statistical Methodology, vol. 58, no. 1, pp. 267-288, 1996. 10 +[248] A. E. Hoerl and R. W. Kennard, "Ridge regression: Biased estimation for nonorthogonal problems," Technometrics, vol. 12, no. 1, pp. 55-67, 1970. 10 +[249] H. Zou and T. Hastie, “Zou h, hastie t. regularization and variable selection via the elastic net.” Journal of the Royal Statistical Society: Series B (Statistical Methodology), vol. 67, pp. 301–320, 2005. 10 +[250] J. T. Hancock and T. M. Khoshgoftaar, "Survey on categorical data for neural networks," Journal of big data, vol. 7, no. 1, p. 28, 2020. 11 +[251] J. R. Quinlan, C4.5: programs for machine learning. Elsevier, 2014. 12 +[252] L. Breiman, "Random forests," Machine learning, vol. 45, pp. 5-32, 2001. 12 +[253] Z.-H. Zhou and Y. Jiang, "Nec4. 5: Neural ensemble based c4. 5," IEEE Transactions on knowledge and data engineering, vol. 16, no. 6, pp. 770-773, 2004. 12, 14, 20 +[254] T. Hastie and R. Tibshirani, "Generalized additive models," Statistical science, vol. 1, no. 3, pp. 297-310, 1986. 12 +[255] R. Agarwal, L. Melnick, N. Frosst, X. Zhang, B. Lengerich, R. Caruana, and G. E. Hinton, "Neural additive models: Interpretable machine learning with neural nets," in NeurIPS, 2021, pp. 4699-4711. 12, 20 +[256] W.-Y. Wang, W.-W. Du, D. Xu, W. Wang, and W.-C. Peng, "A survey on self-supervised learning for non-sequential tabular data," Machine Learning, vol. 114, no. 1, p. 16, 2025. 13, 14 +[257] G. Hinton, O. Vinyals, and J. Dean, "Distilling the knowledge in a neural network," CoRR, vol. abs/1503.02531, 2015. 14 +[258] S. Yun, D. Han, S. Chun, S. J. Oh, Y. Yoo, and J. Choe, "Cutmix: Regularization strategy to train strong classifiers with localizable features," in ICCV, 2019, pp. 6023-6032. 14 +[259] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, "mixup: Beyond empirical risk minimization," in ICLR, 2018. 14 +[260] C. Hou and Z.-H. Zhou, "One-pass learning with incremental and decremental features," IEEE transactions on pattern analysis and machine intelligence, vol. 40, no. 11, pp. 2776-2792, 2017. 15 +[261] H.-J. Ye, D.-C. Zhan, Y. Jiang, and Z.-H. Zhou, "Rectify heterogeneous models with semantic mapping," in ICML, 2018, pp. 5630-5639. 15 +[262] H.-J. Ye, L. Han, and D.-C. Zhan, "Revisiting unsupervised meta-learning via the characteristics of few-shot tasks," IEEE + +Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 3, pp. 3721-3737, 2022. 15 +[263] Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis, L. Zettlemoyer, and V. Stoyanov, "Roberta: A robustly optimized bert pretraining approach," CoRR, vol. abs/1907.11692, 2019. 16 +[264] F. Mahdisoltani, J. Biega, and F. M. Suchanek, "YAGO3: A knowledge base from multilingual wikipediais," in CIDR, 2015. 16 +[265] N. Hollmann, S. Müller, and F. Hutter, "Large language models for automated data science: Introducing caafe for context-aware automated feature engineering," in NeurIPS, 2023, pp. 44753-44775. 16 +[266] S. Han, J. Yoon, S. O. Arik, and T. Pfister, "Large language models can automatically engineer features for few-shot tabular learning," in ICML, 2024, pp. 17454-17479. 16 +[267] J. Herzig, P. K. Nowak, T. Müller, F. Piccinno, and J. M. Eisenschlos, "Tapas: Weakly supervised table parsing via pre-training," in ACL, 2020, pp. 4320-4333. 16 +[268] P. Yin, G. Neubig, W. tau Yih, and S. Riedel, "Tabert: Pretraining for joint understanding of textual and tabular data," in ACL, 2020, pp. 8413-8426. 16 +[269] M. Chen, L. Shen, Z. Li, X. J. Wang, J. Sun, and C. Liu, "Visions: Visual masked autoencoders are free-lunch zero-shot time series forecasters," CoRR, vol. abs/2408.17253, 2024. 16 +[270] Z. Li, S. Li, and X. Yan, "Time series as images: Vision transformer for irregularly sampled time series," in NeurIPS, 2023, pp. 49 187-49 204. 16 +[271] A. Kirillov, E. Mintun, N. Ravi, H. Mao, C. Rolland, L. Gustafson, T. Xiao, S. Whitehead, A. C. Berg, W.-Y. Lo, P. Dólar, and R. B. Girshick, "Segment anything," in ICCV, 2023, pp. 3992-4003. 17 +[272] D. Ha, A. M. Dai, and Q. V. Le, "Hypernetworks," in ICLR, 2017. 18 +[273] W.-L. Chao, H.-J. Ye, D.-C. Zhan, M. E. Campbell, and K. Q. Weinberger, “Revisiting meta-learning as supervised learning,” CoRR, vol. abs/2002.00573, 2020. 18 +[274] J. Peters, D. Janzing, and B. Scholkopf, Elements of causal inference: foundations and learning algorithms. The MIT Press, 2017. 18 +[275] R. Neal, Bayesian Learning for Neural Networks, ser. Incs. springer, 1996. 18 +[276] S. Müller, N. Hollmann, S. Pineda-Arango, J. Grabocka, and F. Hutter, "Transformers can do bayesian inference," in ICLR, 2022. 18 +[277] H.-J. Ye, S.-Y. Liu, and W.-L. Chao, "A closer look at tabpfn v2: Strength, limitation, and extension," CoRR, vol. abs/2502.17361, 2025. 18 +[278] T. Iwata and A. Kumagai, "Meta-learning of semi-supervised learning from tasks with heterogeneous attribute spaces," CoRR, vol. abs/2311.05088, 2023. 18 +[279] T. Nagler, "Statistical foundations of prior-data fitted networks," in ICML, A. Krause, E. Brunskill, K. Cho, B. Engelhardt, S. Sabato, and J. Scarlett, Eds., 2023, pp. 25660-25676. 18 +[280] J. Ma, A. Dankar, G. Stein, G. Yu, and A. L. Caterini, "Tabpfgen - tabular data generation with tabpfn," CoRR, vol. abs/2406.05216, 2024. 18 +[281] S. Ruiz-Villafranca, J. R. Gómez, J. M. C. Gómez, J. C. Mondéjar, and J. L. Martínez, "A tabpfn-based intrusion detection system for the industrial internet of things," The Journal of Supercomputing, vol. 80, no. 14, pp. 20080-20117, 2024. 18 +[282] A. Margeloiu, A. Bazaga, N. Simidjievski, P. Lio, and M. Jamnik, "Tabmda: Tabular manifold data augmentation for any classifier using transformers with in-context subsetting," CoRR, vol. abs/2406.01805, 2024. 18 +[283] S. B. Hoo, S. Müller, D. Salinas, and F. Hutter, "The tabular foundation model tabpfn outperforms specialized time series forecasting models based on simple features," CoRR, vol. abs/2501.02945, 2025. 18 +[284] F. den Breejen, S. Bae, S. Cha, and S.-Y. Yun, "Fine-tuned in-context learning transformers are excellent tabular data classifiers," CoRR, vol. abs/2405.13396v2, 2025. 18, 19 +[285] Y. Wu and D. L. Bergman, "Zero-shot meta-learning for tabular prediction tasks with adversarially pre-trained transformer," CoRR, vol. abs/2502.04573, 2025. 18 +[286] J. Qu, D. Holzmüller, G. Varoquaux, and M. L. Morvan, "Tabicl: A tabular foundation model for in-context learning on large data," CoRR, vol. abs/2502.05564, 2025. 18, 19 + +[287] B. Feuer, C. Hegde, and N. Cohen, "Scaling tabpfn: Sketching and feature selection for tabular prior-data fitted networks," CoRR, vol. abs/2311.10609, 2023. 18 +[288] J. Ma, V. Thomas, G. Yu, and A. L. Caterini, "In-context data distillation with tabpfn," CoRR, vol. abs/2402.06971, 2024. 18 +[289] B. Feuer, R. T. Schirrmeister, V. Cherepanova, C. Hegde, F. Hutter, M. Goldblum, N. Cohen, and C. White, "Tunetables: Context optimization for scalable prior-data fitted networks," in NeurIPS, 2024, pp. 83430-83464. 18, 19 +[290] D. Xu, O. Cirit, R. Asadi, Y. Sun, and W. Wang, "Mixture of in-context prompters for tabular pfns," CoRR, vol. abs/2405.16156, 2024. 19 +[291] M. Koshil, T. Nagler, M. Feurer, and K. Eggensperger, "Towards localization via data embedding for tabPFN," in NeurIPS Workshop, 2024. 19 +[292] Y. Zeng, W. Kang, and A. C. Mueller, "Tabflex: Scaling tabular learning to millions with linear attention," in NeurIPS Workshop, 2024. 19, 21 +[293] S. K. Baur and S. Kim, “Exploration of autoregressive models for in-context learning on tabular data,” in NeurIPS Workshop, 2024. 19 +[294] M. Arbel, D. Salinas, and F. Hutter, "Equitabpfn: A target-permutation equivariant prior fitted networks," CoRR, vol. abs/2502.06684, 2025. 19 +[295] Y. Sun, X. Wen, S. Zheng, X. Jia, and J. Bian, "Scaling generative tabular learning for large language models," in NeurIPS Workshop, 2024. 19 +[296] Y. Freund, R. E. Schapire et al., "Experiments with a new boosting algorithm," in ICML, vol. 96, 1996, pp. 148-156. 19 +[297] Z.-H. Zhou, Ensemble methods: foundations and algorithms. CRC press, 2012. 19 +[298] Y. Wen, D. Tran, and J. Ba, "Batchsemble: an alternative approach to efficient ensemble and lifelong learning," in ICLR, 2020. 19 +[299] M. Jayawardhana, Renbo, S. Dooley, V. Cherepanova, A. G. Wilson, F. Hutter, C. White, T. Goldstein, and M. Goldblum, "Transformers boost the performance of decision trees on tabular data across sample sizes," CoRR, vol. abs/2502.02672v2, 2025. 19 +[300] R. Caruana, A. Munson, and A. Niculescu-Mizil, “Getting the most out of ensemble selection,” in ICDM, 2006, pp. 828-833. 20 +[301] Y. Wang, B. Jiang, Y. Guo, Q. Gan, D. Wipf, X. Huang, and X. Qiu, "Prior-fitted networks scale to larger datasets when treated as weak learners," CoRR, vol. abs/2503.01256, 2025. 20 +[302] J. C. Gower, "A general coefficient of similarity and some of its properties," Biometrics, pp. 857-871, 1971. 20 +[303] F. T. Liu, K. M. Ting, and Z.-H. Zhou, "Isolation forest," in ICDM, 2008, pp. 413-422. 20 +[304] M. M. Breunig, H.-P. Kriegel, R. T. Ng, and J. Sander, “Lof: identifying density-based local outliers,” in SIGMOD, 2000, pp. 93-104. 20 +[305] T. Shenkar and L. Wolf, "Anomaly detection for tabular data with internal contrastive learning," in ICLR, 2022. 20 +[306] A. Li, Y. Zhao, C. Qiu, M. Kloft, P. Smyth, M. Rudolph, and S. Mandt, "Anomaly detection of tabular data using llms," CoRR, vol. abs/2406.16308, 2024. 20 +[307] C. Lee, J. Kim, and N. Park, "Codi: Co-evolving contrastive diffusion models for mixed-type tabular synthesis," in ICML, 2023, pp. 18940-18956. 20 +[308] R. Tu, Z. Senane, L. Cao, C. Zhang, H. Kjellström, and G. E. Henter, "Causality for tabular data synthesis: A high-order structure causal benchmark framework," CoRR, vol. abs/2406.08311, 2024. 20 +[309] R. Feinman and B. M. Lake, "Generating new concepts with hybrid neuro-symbolic models," CoRR, vol. abs/2003.08978, 2020. 20 +[310] T. Hastie, “The elements of statistical learning: data mining, inference, and prediction,” 2009. 20 +[311] B. M. Greenwell et al., "pdp: An r package for constructing partial dependence plots," R Journal, vol. 9, no. 1, p. 421, 2017. 20 +[312] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, C.-S. Chen, and D. T.-H. Chang, "Dofen: Deep oblivious forest ensemble," in NeurIPS, 2024, pp. 44624-44677. 20 +[313] B. Sun and K. Saenko, "Deep CORAL: correlation alignment for deep domain adaptation," in ECCV Workshops (3), 2016, pp. 443-450. 20 +[314] C. Kim, T. Kim, S. Woo, J. Y. Yang, and E. Yang, "Adaptable: Test-time adaptation for tabular data via shift-aware uncertainty cali + +brator and label distribution handler," CoRR, vol. abs/2407.10784, 2024. 20 +[315] Y. Ganin, E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, and V. S. Lempitsky, "Domain-adversarial training of neural networks," J. Mach. Learn. Res., vol. 17, pp. 59:1-59:35, 2016. 20 +[316] S. Sagawa, P. W. Koh, T. B. Hashimoto, and P. Liang, "Distribu-tionally robust neural networks," in ICLR, 2020. 20 +[317] D. Levy, Y. Carmon, J. C. Duchi, and A. Sidford, "Large-scale methods for distributionally robust optimization," in NeurIPS, 2020, pp. 8847-8860. 20 +[318] J. Zhang, A. K. Menon, A. Veit, S. Bhojanapalli, S. Kumar, and S. Sra, "Coping with label shift via distributionally robust optimisation," in ICLR, 2021. 20 +[319] H.-R. Cai and H.-J. Ye, "Understanding the limits of deep tabular methods with temporal shift," CoRR, vol. abs/2502.20260, 2025. 21 +[320] W. Huang, "Multimodal contrastive learning and tabular attention for automated alzheimers disease prediction," in ICCV (Workshops), 2023, pp. 2465-2474. 21 +[321] S. Du, S. Zheng, Y. Wang, W. Bai, D. P. O'Regan, and C. Qin, "Tip: Tabular-image pre-training for multimodal classification with incomplete data," in ECCV, 2024, pp. 478-496. 21 +[322] A. Gilani, S. R. Qasim, I. Malik, and F. Shafait, "Table detection using deep learning," in ICDAR, 2017, pp. 771-776. 21 +[323] M. Li, L. Cui, S. Huang, F. Wei, M. Zhou, and Z. Li, "Tablebank: Table benchmark for image-based table detection and recognition," in LREC, 2020, pp. 1918-1925. 21 +[324] S. Schreiber, S. Agne, I. Wolf, A. Dengel, and S. Ahmed, "Deepdesrt: Deep learning for detection and structure recognition of tables in document images," in ICDAR, 2017, pp. 1162-1167. 21 +[325] M. s. Kasem, A. Abdallah, A. Berendeyev, E. Elkady, M. Mahmoud, M. Abdalla, M. Hamada, S. Vascon, D. Nurseitov, and I. Taj-eddin, "Deep learning for table detection and structure recognition: A survey," ACM Computing Surveys, vol. 56, no. 12, pp. 1-41, 2024. 21 +[326] W. Chen, M.-W. Chang, E. Schlinger, W. Wang, and W. W. Cohen, "Open question answering over tables and text," CoRR, vol. abs/2010.10439, 2020. 21 +[327] A. Talmor, O. Yoran, A. Catav, D. Lahav, Y. Wang, A. Asai, G. Ilharco, H. Hajishirzi, and J. Berant, "Multimodalqa: Complex question answering over text, tables and images," CoRR, vol. abs/2104.06039, 2021. 21 +[328] S. Appalaraju, B. Jasani, B. U. Kota, Y. Xie, and R. Manmatha, "Docformer: End-to-end transformer for document understanding," in ICCV, 2021, pp. 993-1003. 21 +[329] C. Da, P. Wang, and C. Yao, "Multi-granularity prediction with learnable fusion for scene text recognition," CoRR, vol. abs/2307.13244, 2023. 21 +[330] Z. Gu, C. Meng, K. Wang, J. Lan, W. Wang, M. Gu, and L. Zhang, "Xylayoutlm: Towards layout-aware multimodal networks for visually-rich document understanding," in CVPR, 2022, pp. 4583-4592. 21 +[331] A. Nassar, N. Livathinos, M. Lysak, and P. Staar, "Tableformer: Table structure understanding with transformers," in CVPR, 2022, pp. 4614-4623. 21 +[332] G. Kim, T. Hong, M. Yim, J. Park, J. Yim, W. Hwang, S. Yun, D. Han, and S. Park, "Donut: Document understanding transformer withoutOCR," CoRR, vol. abs/2111.15664, 2021. 21 +[333] H. Feng, Z. Wang, J. Tang, J. Lu, W. Zhou, H. Li, and C. Huang, "Unidoc: A universal large multimodal model for simultaneous text detection, recognition, spotting and understanding," CoRR, vol. abs/2308.11592, 2023. 21 +[334] J. Wan, S. Song, W. Yu, Y. Liu, W. Cheng, F. Huang, X. Bai, C. Yao, and Z. Yang, "Omniparser: A unified framework for text spotting key information extraction and table recognition," in CVPR, 2024, pp. 15641-15653. 21 +[335] W. Zhao, H. Feng, Q. Liu, J. Tang, S. Wei, B. Wu, L. Liao, Y. Ye, H. Liu, W. Zhou et al., "Tabpedia: Towards comprehensive visual table understanding with concept synergy," CoRR, vol. abs/2406.01326, 2024. 21 +[336] Z. Li, B. Yang, Q. Liu, Z. Ma, S. Zhang, J. Yang, Y. Sun, Y. Liu, and X. Bai, "Monkey: Image resolution and text label are important things for large multi-modal models," in CVPR, 2024, pp. 26763-26773. 21 + +[337] Y. Liu, B. Yang, Q. Liu, Z. Li, Z. Ma, S. Zhang, and X. Bai, "Textmonkey: AnOCR-free large multimodal model for understanding document," CoRR, vol. abs/2403.04473, 2024. 21 +[338] J. Ye, A. Hu, H. Xu, Q. Ye, M. Yan, Y. Dan, C. Zhao, G. Xu, C. Li, J. Tian et al., "mplug-docowl: Modularized multimodal large language model for document understanding," CoRR, vol. abs/2307.02499, 2023. 21 +[339] N. Deng, Z. Sun, R. He, A. Sikka, Y. Chen, L. Ma, Y. Zhang, and R. Mihalcea, "Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data," CoRR, vol. abs/2402.12424, 2024. 21 +[340] Z.-H. Zhou, "Open-environment machine learning," National Science Review, vol. 9, no. 8, p. nwac123, 07 2022. 21 +[341] W. Ren, X. Li, H. Chen, V. Rakesh, Z. Wang, M. Das, and V. G. Honavar, "Tablog: Test-time adaptation for tabular data using logic rules," in ICML, 2024, pp. 42417-42427. 21 +[342] J. Kaplan, S. McCandlish, T. Henighan, T. B. Brown, B. Chess, + +R. Child, S. Gray, A. Radford, J. Wu, and D. Amodei, "Scaling laws for neural language models," CoRR, vol. abs/2001.08361, 2020. 21 +[343] Z.-H. Zhou, "Learnware: on the future of machine learning," Frontiers of Computer Science, vol. 10, no. 4, pp. 589-590, 2016. 21 +[344] Z.-H. Zhou and Z.-H. Tan, "Learnware: small models do big," Science China Information Science, vol. 67, no. 1, 2024. 21 +[345] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, "Annotatedtables: A large tabular dataset with language model annotations," CoRR, vol. abs/2406.16349, 2024. 21 +[346] Z.-H. Zhou, "Learnability with time-sharing computational resource concerns," National Science Review, vol. 11, no. 10, p. nwae204, 06 2024. 22 +[347] W. Liang, Y. Zhang, Y. Kwon, S. Yeung, and J. Y. Zou, "Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning," in NeurIPS, 2022. 22 \ No newline at end of file diff --git a/data/2025/2504_16xxx/2504.16109/images/0143006dcbf94506f23c4872c5ddb3dfd3a832c6465dc6e88fc3fcdfe7b31008.jpg b/data/2025/2504_16xxx/2504.16109/images/0143006dcbf94506f23c4872c5ddb3dfd3a832c6465dc6e88fc3fcdfe7b31008.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4b254e454c23a80f21a0088773467b89179908f --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/0143006dcbf94506f23c4872c5ddb3dfd3a832c6465dc6e88fc3fcdfe7b31008.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df9eceec878ea417069316a82b51e4b614e7c30a9fd9e6b19ce0bdeecee779a8 +size 24716 diff --git a/data/2025/2504_16xxx/2504.16109/images/0c3b348377cdacba43d6c9c27c9890a6b7801bef55e3b56fb722125736d11ff9.jpg b/data/2025/2504_16xxx/2504.16109/images/0c3b348377cdacba43d6c9c27c9890a6b7801bef55e3b56fb722125736d11ff9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f46d13740e93090cc291acb260d8475a48a7d41 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/0c3b348377cdacba43d6c9c27c9890a6b7801bef55e3b56fb722125736d11ff9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95b91aff1c1da5fd2934f1ec76ff7532cfc5519ef7fbe472ed884ac52efa2b6f +size 15135 diff --git a/data/2025/2504_16xxx/2504.16109/images/0d9d9cb1c1ca63909087b16acb67b2648855940519a0c15a4594452e098aa2db.jpg b/data/2025/2504_16xxx/2504.16109/images/0d9d9cb1c1ca63909087b16acb67b2648855940519a0c15a4594452e098aa2db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ed7b4a4792234cd5fa455c7f4fbf965614d44d4 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/0d9d9cb1c1ca63909087b16acb67b2648855940519a0c15a4594452e098aa2db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3accebebc1a6a76ce45d0bd0b82477a9ed82a052c54eab5a139c9a48574fc225 +size 107722 diff --git a/data/2025/2504_16xxx/2504.16109/images/12771ab1437687a7b7f213e1ce16fc681b1d4d6741ca4a2572a2e60c0e8323f5.jpg b/data/2025/2504_16xxx/2504.16109/images/12771ab1437687a7b7f213e1ce16fc681b1d4d6741ca4a2572a2e60c0e8323f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..566df1605b4fdc75d20593a53f6fce07fcf5a688 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/12771ab1437687a7b7f213e1ce16fc681b1d4d6741ca4a2572a2e60c0e8323f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5423b943b3044e423a077406db03ec6cdeb95b98165ebbe659611d409a4932d3 +size 8594 diff --git a/data/2025/2504_16xxx/2504.16109/images/172234d4f3ca90801d6fb35295f332435c76d1756e9f1a75e41d12052939df1d.jpg b/data/2025/2504_16xxx/2504.16109/images/172234d4f3ca90801d6fb35295f332435c76d1756e9f1a75e41d12052939df1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8bbaa57802eef0bf979bb506a633f57f124f7eb --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/172234d4f3ca90801d6fb35295f332435c76d1756e9f1a75e41d12052939df1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf993ef6df1c25426beb04ec7532b85a7bb4399711118611cb28ac6ed3ebd7bf +size 116084 diff --git a/data/2025/2504_16xxx/2504.16109/images/1cb1d949fe6300f2c81dade78b72b5132ea16551e7587e0e4a4975de40932726.jpg b/data/2025/2504_16xxx/2504.16109/images/1cb1d949fe6300f2c81dade78b72b5132ea16551e7587e0e4a4975de40932726.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a64bec2ff94e31556a518fe09f0e1d8ac605629d --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/1cb1d949fe6300f2c81dade78b72b5132ea16551e7587e0e4a4975de40932726.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b48c67d573162db6fd8e4dc86e8a7593643d0c43c07695772f948e526a3d8694 +size 19428 diff --git a/data/2025/2504_16xxx/2504.16109/images/2810f4f79d0742a7f04b257adc7a6d771ff30010de75e71b2e070205dd3e7735.jpg b/data/2025/2504_16xxx/2504.16109/images/2810f4f79d0742a7f04b257adc7a6d771ff30010de75e71b2e070205dd3e7735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06bf6ccbc20930df006833527f38676b23766ef2 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/2810f4f79d0742a7f04b257adc7a6d771ff30010de75e71b2e070205dd3e7735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9a4e907ce3069e7ec3d8f15f757337e45fcf5ff2ccaa9764b57fbbfc95896fb +size 13735 diff --git a/data/2025/2504_16xxx/2504.16109/images/401e471e9a4b49c711e12c51ac59ccef0dc6d84507aa9ade4b619df4f6692731.jpg b/data/2025/2504_16xxx/2504.16109/images/401e471e9a4b49c711e12c51ac59ccef0dc6d84507aa9ade4b619df4f6692731.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80274526ac672d3e00768802cff48e2a511864ae --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/401e471e9a4b49c711e12c51ac59ccef0dc6d84507aa9ade4b619df4f6692731.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05debf1a27453eef816f7e52fda7ff0543d139b4c875eb08a9481ed117e489c2 +size 6290 diff --git a/data/2025/2504_16xxx/2504.16109/images/4905a666543e6173e42b6e66fa5fa25130cc3fda26fc641421ab2bcfc9714cc7.jpg b/data/2025/2504_16xxx/2504.16109/images/4905a666543e6173e42b6e66fa5fa25130cc3fda26fc641421ab2bcfc9714cc7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa00c87848ce10d7d69a2adde088e20033d5efe4 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/4905a666543e6173e42b6e66fa5fa25130cc3fda26fc641421ab2bcfc9714cc7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3adf4a3e9c153cfa7db658cdb43b5e44e823cdeb6509ffe34b3d564eed604685 +size 3123 diff --git a/data/2025/2504_16xxx/2504.16109/images/58235d84006ce990581e96726220630221697514d510637a9cf60a4a939d7036.jpg b/data/2025/2504_16xxx/2504.16109/images/58235d84006ce990581e96726220630221697514d510637a9cf60a4a939d7036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ba354fa79228b7921c7fba72854ec33baa868a0 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/58235d84006ce990581e96726220630221697514d510637a9cf60a4a939d7036.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5d8f042a39df7d846bc014b262908a49c42ed1acfb6c3d51b71833c5ad91541 +size 3709 diff --git a/data/2025/2504_16xxx/2504.16109/images/65ce6230b3d29ce1199fc2127cfe0a5435c735cc905b76dbc127228712d8de2d.jpg b/data/2025/2504_16xxx/2504.16109/images/65ce6230b3d29ce1199fc2127cfe0a5435c735cc905b76dbc127228712d8de2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2f12dc9dc210bf8b60a959667c178ac0efe5e86 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/65ce6230b3d29ce1199fc2127cfe0a5435c735cc905b76dbc127228712d8de2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44b1d8d56c71f7382c05ee99b805eb7b1f9dddb77d972500b2e60c6f451a7fc8 +size 173489 diff --git a/data/2025/2504_16xxx/2504.16109/images/66966f1b48254c69ec49709de32610ec85ebc14caea1a2cea39c1e73a8debf60.jpg b/data/2025/2504_16xxx/2504.16109/images/66966f1b48254c69ec49709de32610ec85ebc14caea1a2cea39c1e73a8debf60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9716db4191b022a63b76cc19fa302f8c05c540c4 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/66966f1b48254c69ec49709de32610ec85ebc14caea1a2cea39c1e73a8debf60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d6d41953e9bd87d3f91528b00f81e3023a179acf795b2781ed30664f9a2d0c5 +size 21946 diff --git a/data/2025/2504_16xxx/2504.16109/images/6bfc55f39ab4036bf3a443b1a972dd614f4618aff254eec32b873d00f78d2a83.jpg b/data/2025/2504_16xxx/2504.16109/images/6bfc55f39ab4036bf3a443b1a972dd614f4618aff254eec32b873d00f78d2a83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e542d92c8e2485ac8fcba2d9b391688808a15ed8 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/6bfc55f39ab4036bf3a443b1a972dd614f4618aff254eec32b873d00f78d2a83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5144fb5a0e9840b0d82e28be262c1415562e41fc7027f9f21b4ca4f1dd0d987 +size 9259 diff --git a/data/2025/2504_16xxx/2504.16109/images/6d4e9319095dcfc6e9b34428b411b9cf86fb7fec061631cdcce9c8f786e2f101.jpg b/data/2025/2504_16xxx/2504.16109/images/6d4e9319095dcfc6e9b34428b411b9cf86fb7fec061631cdcce9c8f786e2f101.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64ac9c7917b48543cf79bfce9e3220fffbd34f55 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/6d4e9319095dcfc6e9b34428b411b9cf86fb7fec061631cdcce9c8f786e2f101.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20e62423628359fd05dc3e5e995f62b7bec79ca3948238736151ca4fb13e0af4 +size 5050 diff --git a/data/2025/2504_16xxx/2504.16109/images/886d5839208dbb126a86d29a0904962b263e4a1c1cdbafa3fd800eb2090af5e9.jpg b/data/2025/2504_16xxx/2504.16109/images/886d5839208dbb126a86d29a0904962b263e4a1c1cdbafa3fd800eb2090af5e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a4d635f9af807e4be204c2edbda02c8645e25a4 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/886d5839208dbb126a86d29a0904962b263e4a1c1cdbafa3fd800eb2090af5e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb21da82a137d6a99f44d4440cf2e31ad37751f8c3f4c8be6311acdb84ed1500 +size 25620 diff --git a/data/2025/2504_16xxx/2504.16109/images/8beeccceaf07e5ef22e0485f90f15baa5f05fc84f8c7d42c772317522657ca6d.jpg b/data/2025/2504_16xxx/2504.16109/images/8beeccceaf07e5ef22e0485f90f15baa5f05fc84f8c7d42c772317522657ca6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb3a2d794b9d34c5b4b560a60bc743865363332c --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/8beeccceaf07e5ef22e0485f90f15baa5f05fc84f8c7d42c772317522657ca6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c6711ec0d70fc25d40fc2024a8b396ef8a25ba1ea2849d6a3128c839e88232d +size 8634 diff --git a/data/2025/2504_16xxx/2504.16109/images/ba9c9bbbcbdaf4046aec7ae3197352481649a45b1f333aeeab8500a90b2de4d9.jpg b/data/2025/2504_16xxx/2504.16109/images/ba9c9bbbcbdaf4046aec7ae3197352481649a45b1f333aeeab8500a90b2de4d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f57006535c1fa5e39f6b6f3407ab8889fae85c0 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/ba9c9bbbcbdaf4046aec7ae3197352481649a45b1f333aeeab8500a90b2de4d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd54cd0cc57921f26d4f87bbdcfbf704c8a33c1fc8c8cf01974d2fd3460dd8d7 +size 8249 diff --git a/data/2025/2504_16xxx/2504.16109/images/d744b1c13f37ff56106565f2e4ea019e84225de34f2718e162eac86475307dcf.jpg b/data/2025/2504_16xxx/2504.16109/images/d744b1c13f37ff56106565f2e4ea019e84225de34f2718e162eac86475307dcf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..855a9915c8740b6eb32e719315a187c993d8afba --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/d744b1c13f37ff56106565f2e4ea019e84225de34f2718e162eac86475307dcf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce503aa786692a728ac8e29ac3c42f911dbc842314acaed267172f0e50736c23 +size 4172 diff --git a/data/2025/2504_16xxx/2504.16109/images/e4038b53b6a263f14d47247efe8303fc0326a04166e3783818d71299bc25664d.jpg b/data/2025/2504_16xxx/2504.16109/images/e4038b53b6a263f14d47247efe8303fc0326a04166e3783818d71299bc25664d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4954ba8fcb2c6914b59994e7bfe44de1bbd2684d --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/e4038b53b6a263f14d47247efe8303fc0326a04166e3783818d71299bc25664d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05e1ebbb2e74578c0236f2d9b0ea56417114a3101ca12083592a02d0442134ae +size 37023 diff --git a/data/2025/2504_16xxx/2504.16109/images/e77a8beab13b7e0d0e9ecbd709d979618031cd2624a214a28ac3e617bc4ed51b.jpg b/data/2025/2504_16xxx/2504.16109/images/e77a8beab13b7e0d0e9ecbd709d979618031cd2624a214a28ac3e617bc4ed51b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7fcc5b5eeeaaac633c61fc2971cb483e11b92272 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/e77a8beab13b7e0d0e9ecbd709d979618031cd2624a214a28ac3e617bc4ed51b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e169f5086265b9a83fbbdf2e4db2c8198703ff29c068b869df8a834cfdadd78 +size 3463 diff --git a/data/2025/2504_16xxx/2504.16109/images/f88240d6a6ad7bfcf76cb01cc12c3e87300a15292b268038c8a926ab317be95e.jpg b/data/2025/2504_16xxx/2504.16109/images/f88240d6a6ad7bfcf76cb01cc12c3e87300a15292b268038c8a926ab317be95e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fa541d023cd462d845d4d959fd622fae60bb5fc --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/f88240d6a6ad7bfcf76cb01cc12c3e87300a15292b268038c8a926ab317be95e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c8eab65c813bea0bcb9b96b782f17fee24dc80b036f8e3c1425eb3604e81ff6 +size 10086 diff --git a/data/2025/2504_16xxx/2504.16109/images/f93fe1bc007c8bd98d2af02027122592dc474f533fa54a8f3792835c82ce9f57.jpg b/data/2025/2504_16xxx/2504.16109/images/f93fe1bc007c8bd98d2af02027122592dc474f533fa54a8f3792835c82ce9f57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06600edaffe69544e275d2c0ee79836b5bd65631 --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/images/f93fe1bc007c8bd98d2af02027122592dc474f533fa54a8f3792835c82ce9f57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f282640bc05942ae7d9e459d0b5856394e3aa19b143cbd72351620e1c5fface +size 859 diff --git a/data/2025/2504_16xxx/2504.16109/layout.json b/data/2025/2504_16xxx/2504.16109/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..068e48c19df5441cb147196b0546dbbbd6830f3c --- /dev/null +++ b/data/2025/2504_16xxx/2504.16109/layout.json @@ -0,0 +1,27441 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 83, + 52, + 531, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 52, + 531, + 108 + ], + "spans": [ + { + "bbox": [ + 83, + 52, + 531, + 108 + ], + "type": "text", + "content": "Representation Learning for Tabular Data: A Comprehensive Survey" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 120, + 465, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 120, + 465, + 133 + ], + "spans": [ + { + "bbox": [ + 143, + 120, + 465, + 133 + ], + "type": "text", + "content": "Jun-Peng Jiang, Si-Yang Liu, Hao-Run Cai, Qile Zhou, Han-Jia Ye" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 152, + 546, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 152, + 546, + 308 + ], + "spans": [ + { + "bbox": [ + 64, + 152, + 546, + 308 + ], + "type": "text", + "content": "Abstract—Tabular data, structured as rows and columns, is among the most prevalent data types in machine learning classification and regression applications. Models for learning from tabular data have continuously evolved, with Deep Neural Networks (DNNs) recently demonstrating promising results through their capability of representation learning. In this survey, we systematically introduce the field of tabular representation learning, covering the background, challenges, and benchmarks, along with the pros and cons of using DNNs. We organize existing methods into three main categories according to their generalization capabilities: specialized, transferable, and general models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. We introduce a hierarchical taxonomy for specialized models based on the key aspects of tabular data—features, samples, and objectives—and delve into detailed strategies for obtaining high-quality feature- and sample-level representations. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks, leveraging knowledge acquired from homogeneous or heterogeneous sources, or even cross-modalities such as vision and language. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning. We group these general models based on the strategies used to adapt across heterogeneous datasets. Additionally, we explore ensemble methods, which integrate the strengths of multiple tabular models. Finally, we discuss representative extensions of tabular learning, including open-environment tabular machine learning, multimodal learning with tabular data, and tabular understanding tasks. More information can be found in the following repository: https://github.com/LAMDA-Tabular/Tabular-Survey." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 317, + 538, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 317, + 538, + 328 + ], + "spans": [ + { + "bbox": [ + 64, + 317, + 538, + 328 + ], + "type": "text", + "content": "Index Terms—Tabular Data, Representation Learning, Deep Tabular Learning, Tabular Machine Learning, Tabular Foundation Model" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 300, + 334, + 309, + 342 + ], + "blocks": [ + { + "bbox": [ + 300, + 334, + 309, + 342 + ], + "lines": [ + { + "bbox": [ + 300, + 334, + 309, + 342 + ], + "spans": [ + { + "bbox": [ + 300, + 334, + 309, + 342 + ], + "type": "image", + "image_path": "f93fe1bc007c8bd98d2af02027122592dc474f533fa54a8f3792835c82ce9f57.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 376, + 140, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 376, + 140, + 387 + ], + "spans": [ + { + "bbox": [ + 45, + 376, + 140, + 387 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 392, + 302, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 392, + 302, + 496 + ], + "spans": [ + { + "bbox": [ + 44, + 392, + 302, + 496 + ], + "type": "text", + "content": "Tabular data, characterized by structured rows and columns, is one of the most prevalent data formats in real-world machine learning applications, spanning diverse domains such as finance [1], healthcare [2], education [3], recommendation systems [4], and scientific research. In particular, AI for scientific research (AI4science) has increasingly relied on tabular data, as numerous prominent datasets—such as those from genomics [5], chemistry [6], and climate science [7], [8]—naturally adopt tabular forms." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 497, + 302, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 497, + 302, + 682 + ], + "spans": [ + { + "bbox": [ + 44, + 497, + 302, + 682 + ], + "type": "text", + "content": "Tabular data inherently organizes information in a structured, table-like format. In this survey, we focus primarily on supervised tabular machine learning tasks, specifically classification and regression. Beyond their structured organization, tabular datasets frequently include heterogeneous attributes [9], encompassing numerical, categorical, or mixed data types that may be either dense or sparse. Additionally, many tabular datasets present quality challenges, such as noisy measurements, missing values, outliers, inaccuracies [10], and privacy constraints [11], all of which complicate the modeling process. The most common supervised tabular tasks are classification and regression, where the goal is to learn mappings from training data to discrete or continuous targets, respectively. As illustrated in Figure 1, each row represents an instance (with its corresponding label), while each column corresponds to a specific attribute or feature [12]." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 312, + 376, + 561, + 510 + ], + "blocks": [ + { + "bbox": [ + 312, + 376, + 561, + 510 + ], + "lines": [ + { + "bbox": [ + 312, + 376, + 561, + 510 + ], + "spans": [ + { + "bbox": [ + 312, + 376, + 561, + 510 + ], + "type": "image", + "image_path": "e4038b53b6a263f14d47247efe8303fc0326a04166e3783818d71299bc25664d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 517, + 566, + 588 + ], + "lines": [ + { + "bbox": [ + 308, + 517, + 566, + 588 + ], + "spans": [ + { + "bbox": [ + 308, + 517, + 566, + 588 + ], + "type": "text", + "content": "Figure 1: A brief introduction to tabular data and associated learning tasks. Each row represents an instance and each column corresponds to a specific attribute or feature, which can be numerical or categorical. The most common tabular machine learning tasks are classification and regression as shown in the right side of the figure." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 595, + 566, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 595, + 566, + 631 + ], + "spans": [ + { + "bbox": [ + 308, + 595, + 566, + 631 + ], + "type": "text", + "content": "Ideally, learned mappings should generalize effectively, accurately predicting outcomes for new instances drawn from the same underlying distribution." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 631, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 631, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 631, + 565, + 748 + ], + "type": "text", + "content": "Machine learning methods for tabular data have evolved significantly over the years [13], [14], [15], [16]. Recently, the rise of deep learning has profoundly impacted domains like computer vision [17] and natural language processing [18], where Deep Neural Networks (DNNs) extract semantic representations directly from raw inputs [19], [20], [21]. These learned representations have not only improved generalization but have also facilitated knowledge transfer across related tasks [22]. The flexibility of DNNs in modeling complex feature interactions and learning rich hierarchical" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.16109v1 [cs.LG] 17 Apr 2025" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 694, + 301, + 742 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 694, + 301, + 742 + ], + "spans": [ + { + "bbox": [ + 44, + 694, + 301, + 742 + ], + "type": "text", + "content": "- J.-P. Jiang, S.-Y Liu, H.-R Cai, Q. Zhou, and H.-J. Ye are with School of Artificial Intelligence, Nanjing University, and National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210023, China. E-mail: {jiangjp.liusy,zhouql.yehj}@lamda.nju.edu.cn, caihr@smail.nju.edu.cn" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 46, + 553, + 285 + ], + "blocks": [ + { + "bbox": [ + 47, + 46, + 553, + 285 + ], + "lines": [ + { + "bbox": [ + 47, + 46, + 553, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 46, + 553, + 285 + ], + "type": "image", + "image_path": "0d9d9cb1c1ca63909087b16acb67b2648855940519a0c15a4594452e098aa2db.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 293, + 566, + 353 + ], + "lines": [ + { + "bbox": [ + 44, + 293, + 566, + 353 + ], + "spans": [ + { + "bbox": [ + 44, + 293, + 566, + 353 + ], + "type": "text", + "content": "Figure 2: We organize existing tabular classification/regression methods into three categories according to their generalization capabilities: specialized (left), transferable (middle), and general (right) models. Specialized models focus on tasks where training and evaluation occur within the same data distribution. Transferable models are pre-trained on one or more datasets and subsequently fine-tuned on downstream tasks. General models, also known as tabular foundation models, extend this concept further, allowing direct application to downstream tasks without additional fine-tuning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 358, + 301, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 358, + 301, + 381 + ], + "spans": [ + { + "bbox": [ + 44, + 358, + 301, + 381 + ], + "type": "text", + "content": "structures has inspired significant interest in adapting deep learning techniques to tabular data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 384, + 301, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 384, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 44, + 384, + 301, + 581 + ], + "type": "text", + "content": "Indeed, DNNs were applied to tabular data decades ago, initially targeting dimensionality reduction and visualization tasks [23], [24], [25], [26], yet they typically struggled to match tree-based methods on standard classification and regression problems. Later advances in DNNs have led to significant improvements across various tabular-related applications, such as click-through rate prediction [27], [28], anomaly detection [29], recommendation systems [30], and time series forecasting [31], [32]. Modern deep learning approaches, benefiting from better-designed architectures, optimized training strategies, high-quality representations, have revitalized DNN performance on tabular data, often rivaling or surpassing traditional tree-based models [33], [34], [35]. Given the wide variety of approaches emerging in deep tabular modeling, a systematic overview that revisits critical factors and current methodologies in representation learning for tabular data has become increasingly necessary." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 582, + 301, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 582, + 301, + 698 + ], + "spans": [ + { + "bbox": [ + 44, + 582, + 301, + 698 + ], + "type": "text", + "content": "This survey begins by introducing the background of tabular data learning, highlighting the challenges involved and critically examining the advantages and limitations of utilizing DNNs compared to classical—particularly tree-based—methods [36], [37], [38], [39]. Given the observed instability of method performance across different tabular datasets, we also discuss comprehensive strategies for dataset collection, evaluation, and analysis, aiming to establish robust criteria for aggregating performance metrics across multiple datasets [40], [41], [42], [43]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 700, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 700, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 700, + 301, + 748 + ], + "type": "text", + "content": "We broadly categorize deep tabular methods into three types: specialized methods, transferable methods, and general methods, distinguished by the scope of datasets on which they are trained and deployed, as well as their corresponding" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 358, + 567, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 358, + 567, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 358, + 567, + 635 + ], + "type": "text", + "content": "generalization capabilities (illustrated in Figure 2). Specialized tabular methods align closely with classical supervised models, typically trained and evaluated on data drawn from the same distribution. In contrast, transferable methods leverage knowledge from models pre-trained on one or multiple source datasets, subsequently fine-tuning these models on target datasets; the primary challenge here lies in addressing the heterogeneity between pre-trained sources and target tasks. The recently proposed general tabular methods—motivated by the remarkable \"zero-shot\" generalization abilities demonstrated by large language models (LLMs)—exhibit exceptional versatility. These general models can directly apply their learned representations to downstream tabular datasets without additional fine-tuning, achieving robust generalization due to advanced pre-training strategies. Although the generalization ability tends to increase from specialized to general models, it does not imply that specialized or transferable methods are less valuable; specialized models remain superior on large-scale datasets, and fine-tuning general models can further improve their predictive performance. Additionally, the first two types of methods provide foundational insights and valuable components that contribute significantly to advancements in general tabular models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 643, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 643, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 643, + 566, + 748 + ], + "type": "text", + "content": "For specialized methods, numerous designs have been proposed from diverse perspectives, and previous papers have often categorized these methods based primarily on their architectural characteristics or behaviors. Existing taxonomies [44], for example, group specialized methods into feature-preprocessing-based [33], [45], data-augmentation-based [46], [47], [48], [49], MLP variants [50], [34], specialized DNN architectures [51], [52], [53], [54], [55], [56], [57], [58], tree-mimic approaches [59], [60], [61], token-based tech" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 565, + 34 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 41, + 301, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 41, + 301, + 191 + ], + "spans": [ + { + "bbox": [ + 44, + 41, + 301, + 191 + ], + "type": "text", + "content": "niques [62], [63], [33], [64], [65], regularization-driven methods [66], [67], and neighborhood-based strategies [68], [69], [35]. However, such categorizations can appear scattered, making it difficult to connect the core ideas between methods placed in distinct groups. In contrast, this survey introduces a hierarchical taxonomy based on the key aspects of tabular data—features, samples, and objectives—providing a cohesive organizational framework. Our approach emphasizes detailed strategies for obtaining high-quality representations at both feature- and sample-levels. This unified perspective helps bridge core ideas across diverse methods, facilitating clearer comparative discussions and potentially guiding the design of future, more advanced tabular models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 193, + 301, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 193, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 193, + 301, + 492 + ], + "type": "text", + "content": "Instead of training a model from scratch on a single tabular dataset, transferable models leverage knowledge encoded in a pre-trained model from another dataset, which can significantly enhance the training process, especially when data or computational resources for the target task are limited. A major challenge in transferring knowledge across tabular tasks lies in the inherent heterogeneity between the source and target datasets, particularly differences in their feature and label spaces. In this survey, we adopt a broad perspective on transferable tabular models, categorizing methods based on the sources of their pre-trained knowledge. Specifically, we discuss models pre-trained on homogeneous tabular domains, such as self-supervised methods with additional pre-training steps on the target dataset itself [70], [71]; models pre-trained across heterogeneous tabular domains [72], [73], [64]; and methods transferring knowledge from other modalities, such as vision-based pre-trained models [74], [75], [76]. Additionally, since incorporating attribute semantics (when available) is a common strategy for bridging heterogeneous attribute spaces across tabular datasets [77], [78], [79], we also explore approaches leveraging language models in the final category. In particular, we further organize these language model-based strategies according to the methods they use to extract knowledge and the types of language models involved—ranging from small-scale language models to Large Language Models (LLMs) [80], [81], [82], [83]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 492, + 301, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 492, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 44, + 492, + 301, + 734 + ], + "type": "text", + "content": "Inspired by recent advancements in foundation models from vision and language domains [84], [85], general models—also known as tabular foundation models—expand the concept of transferable tabular models by enabling direct application to downstream tasks without additional fine-tuning. This capability, commonly referred to as the model's \"zero-shot\" ability, significantly enhances the model's usability across diverse tabular datasets. In contrast to transferable models, which primarily focus on bridging knowledge gaps between source and target datasets, general models aim to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. We categorize these general models based on the strategies used to achieve adaptiveness across diverse tabular tasks, specifically examining adaptations from both data-centric [86] and model-centric perspectives [87], [88]. Furthermore, we discuss critical branches of general tabular models in detail: the TabPFN variants leveraging in-context learning [89], [90], [91], and methods utilizing attribute and task semantics to unify heterogeneous tasks within a common representation framework [92], [93], [94]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 734, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 734, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 59, + 734, + 301, + 746 + ], + "type": "text", + "content": "Additionally, ensemble methods [95], [96], [91] are in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 42, + 566, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 170 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 170 + ], + "type": "text", + "content": "produced, which improve the generalization ability based on the strengths of multiple tabular models. Finally, we briefly overview other relevant extensions of tabular learning, including clustering [97], [98], anomaly detection [99], [100], [101], data generation and imputation [102], [103], [104], interpretability [63], [105], [61], multimodal learning [106], [107], open-environment tabular machine learning [108], [109], [110], [111], and tabular understanding [112], [113]. By summarizing the state of the field and identifying open challenges, we aim to guide future research and applications in tabular data representation learning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 185, + 400, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 185, + 400, + 196 + ], + "spans": [ + { + "bbox": [ + 309, + 185, + 400, + 196 + ], + "type": "text", + "content": "2 BACKGROUND" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 200, + 565, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 200, + 565, + 247 + ], + "spans": [ + { + "bbox": [ + 308, + 200, + 565, + 247 + ], + "type": "text", + "content": "This section presents the (supervised) tabular machine learning task, including the notation of tabular data learning, the history of tabular data, the challenges of learning from tabular data, evaluation metrics, and tabular benchmarks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 261, + 456, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 261, + 456, + 273 + ], + "spans": [ + { + "bbox": [ + 309, + 261, + 456, + 273 + ], + "type": "text", + "content": "2.1 Learning with Tabular Data" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": "A supervised tabular dataset is formatted as " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " examples and " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " features/attributes corresponding to " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " rows and " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " columns in the table. An instance " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " is depicted by its " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " feature values. Assume " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "x_{i,j}" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " as the " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": "-th feature of instance " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": ", it could be a numerical (continuous) one " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "x_{i,j}^{\\mathrm{num}}\\in \\mathbb{R}" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": ", like the temperature of a region or the density of the object. " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " can also be a categorical (discrete) value " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "x_{i,j}^{\\mathrm{cat}}" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": ", like one of multiple colors, the location of a person, or even some textual descriptions of the instance. Each instance is associated with a label " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "y_i\\in \\{1, - 1\\}" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " in a binary classification task, " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "y_i\\in [C] = \\{1,\\dots ,C\\}" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " in a multi-class classification task, and " + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "inline_equation", + "content": "y_i\\in \\mathbb{R}" + }, + { + "bbox": [ + 308, + 275, + 565, + 415 + ], + "type": "text", + "content": " in a regression task." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 416, + 566, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 416, + 566, + 498 + ], + "spans": [ + { + "bbox": [ + 309, + 416, + 566, + 498 + ], + "type": "text", + "content": "Remark 1. Ordinal regression [114], [115], also called ordinal classification, is a type of regression analysis used to predict an ordinal variable. It can be considered an intermediate problem between regression and classification. However, this survey primarily focuses on standard classification and regression tasks and does not specifically discuss ordinal regression." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "spans": [ + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": "Given a tabular dataset " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(x_i, y_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": ", we aim to learn a mapping " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": " that maps " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": " to its label " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": ". In other words, the model predicts " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "\\hat{y}_i = f(x_i)" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": ". The general objective learning " + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 308, + 502, + 565, + 549 + ], + "type": "text", + "content": " follows the structural risk minimization:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 351, + 554, + 565, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 554, + 565, + 580 + ], + "spans": [ + { + "bbox": [ + 351, + 554, + 565, + 580 + ], + "type": "interline_equation", + "content": "\\min _ {f} \\sum_ {\\left(\\boldsymbol {x} _ {i}, y _ {i}\\right) \\in \\mathcal {D}} \\ell (y, \\hat {y} _ {i} = f \\left(\\boldsymbol {x} _ {i}\\right)) + \\Omega (f). \\tag {1}", + "image_path": "401e471e9a4b49c711e12c51ac59ccef0dc6d84507aa9ade4b619df4f6692731.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "spans": [ + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "\\ell (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": " measures the discrepancy between the predicted label " + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": " and the true label " + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "y_{i},e.g." + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": ", cross-entropy in classification and mean square error in regression. " + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "\\Omega (\\cdot)" + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": " is the regularization on the model, which restricts the complexity of " + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": ". We expect the learned " + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": " is able to extend its ability to unseen instances sampled from the same distribution as " + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 308, + 584, + 564, + 654 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "type": "text", + "content": "Tabular methods differ in their strategies to implement " + }, + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "type": "text", + "content": ". The \"dummy\" approach makes predictions based on training labels " + }, + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "type": "inline_equation", + "content": "\\{y_i\\}_{i=1}^N" + }, + { + "bbox": [ + 308, + 654, + 565, + 712 + ], + "type": "text", + "content": " directly, which outputs the major class in the training set for classification and the average of all labels for regression, respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "content": "In a " + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "content": "-class classification task, classical parametric methods implement " + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "content": " with a linear mapping, i.e., " + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}_i) = \\pmb{W}^\\top \\pmb{x}_i + \\pmb{b}" + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "content": ", where the classifier " + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "inline_equation", + "content": "\\pmb{W} \\in \\mathbb{R}^{d \\times C}" + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "inline_equation", + "content": "\\pmb{b} \\in \\mathbb{R}^C" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "text", + "content": "is the bias. With different loss functions, we can implement Logistic Regression, SVM, or even AdaBoost. In contrast, non-parametric methods implement the prediction via " + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}_i) = f(\\pmb{x}_i, \\mathcal{D})" + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "text", + "content": ", depending on the whole training set. For example, KNN searches neighbors in the training set " + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "text", + "content": " with the " + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "text", + "content": " smallest distance w.r.t. " + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 44, + 42, + 301, + 146 + ], + "type": "text", + "content": ". KNN can be viewed as a specific label smoother, with a dynamic local region for every instance. [116] links KNN and Random Forest from their ways of smoothing training labels in their predictions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "spans": [ + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": "Deep tabular methods implement " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " with a deep neural network, e.g. Most deep learning models could be decomposed into two parts, i.e., " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}_i) = \\pmb{W}^\\top \\phi(\\pmb{x}_i) + \\pmb{b}" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": ". Similar to the linear model, " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{W}" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{b}" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " are the components of linear classifier, with " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{W} \\in \\mathbb{R}^{d' \\times C}" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " maps the input vector " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " into the " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "d'" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " dimension space, which extracts semantic embeddings for the given tabular input. " + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 44, + 147, + 301, + 240 + ], + "type": "text", + "content": " could be implemented with MLP or residual network." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 256, + 175, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 256, + 175, + 268 + ], + "spans": [ + { + "bbox": [ + 45, + 256, + 175, + 268 + ], + "type": "text", + "content": "2.2 History of Tabular Data" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 272, + 304, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 272, + 304, + 411 + ], + "spans": [ + { + "bbox": [ + 44, + 272, + 304, + 411 + ], + "type": "text", + "content": "Historically, classical machine learning tasks were predominantly formulated with tabular data, or datasets readily transformed into a tabular representation without explicitly designating them as \"tabular.\" In early literature, the term \"tabular\" typically referred to tables within relational databases [117], CSV files on the web [118], or tables in documents [119]. Relevant tasks included table extraction [120], parsing [121], understanding [122], and discovering association rules [123]. With the expansion of machine learning applications into other modalities such as images, texts, audio, and video, the classical vector-based data representations have come to be explicitly termed \"tabular data.\"" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 411, + 301, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 411, + 301, + 562 + ], + "spans": [ + { + "bbox": [ + 44, + 411, + 301, + 562 + ], + "type": "text", + "content": "Early statistical approaches such as linear regression, logistic regression, linear discriminant analysis, and K-Nearest Neighbors (KNN) predate artificial intelligence. Classical learning methods further expanded across various paradigms, including decision trees [124], [125], multi-layer perceptrons (MLPs), support vector machines (SVMs), and nearest centroid classifiers [5], [14]. Ensemble methods enhanced predictive performance by aggregating outputs from multiple base learners [126], [127]. More recently, gradient boosting frameworks [128], [129], such as XGBoost [130], LightGBM [131], and CatBoost [132], have become prominent due to their effectiveness and efficiency in tabular data applications and competitions [133], [134]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 562, + 303, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 562, + 303, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 562, + 303, + 748 + ], + "type": "text", + "content": "With the development of deep learning, DNNs were applied to tabular classification and regression tasks decades ago, utilizing architectures such as stacked Restricted Boltzmann Machines and denoising autoencoders [135], [136], [137]. Early representation learning efforts primarily focused on dimensionality reduction and data visualization tasks [23], [24], [25], [26], yet these models struggled to surpass traditional tree-based methods in terms of generalization. However, advancements in neural network architectures and representation learning strategies have recently led to promising results in related tabular domains, including click-through rate prediction [27], [28], anomaly detection [138], [29], recommendation systems [139], [30], and time series forecasting [31], [140], [32], [141]. Innovations such as convolutional layers and learnable feature embeddings have improved the ability of deep models to capture high-order" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 566, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 100 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 100 + ], + "type": "text", + "content": "attribute relationships [142], [143]. While early deep tabular methods lagged behind ensemble tree-based models, recent techniques have demonstrated competitive or superior performance [33], [34], [35], affirming deep representation learning as a promising direction for tabular data modeling." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 100, + 567, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 100, + 567, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 100, + 567, + 205 + ], + "type": "text", + "content": "While several survey papers have been published [9], [144], the field of tabular data has witnessed remarkable progress over the past two years. On one hand, the emergence of new specialized methods has introduced significant shifts in the landscape, motivating the need for our comprehensive taxonomy. On the other hand, the rise of transferable and general approaches has greatly enhanced the generality and applicability of tabular data modeling, which has been overlooked in previous works." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 222, + 523, + 233 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 222, + 523, + 233 + ], + "spans": [ + { + "bbox": [ + 309, + 222, + 523, + 233 + ], + "type": "text", + "content": "2.3 Challenges of Learning from Tabular Data" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 237, + 565, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 237, + 565, + 271 + ], + "spans": [ + { + "bbox": [ + 308, + 237, + 565, + 271 + ], + "type": "text", + "content": "Different from other types of data sources, e.g., images and texts, there exist several challenges dealing with tabular datasets due to their characteristics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 272, + 566, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 566, + 388 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 566, + 388 + ], + "type": "text", + "content": "Heterogeneity of Features. Unlike continuous image data or token-based textual data, tabular datasets often contain both numerical and categorical attributes, each requiring distinct handling methods [9], [145]. Numerical features frequently exhibit varying ranges and distributions, necessitating normalization or scaling. Categorical features differ in cardinality and semantic interpretation, requiring encoding methods like one-hot vectors or embeddings. Consequently, tabular models must carefully handle these mixed data types to preserve the usability of each feature." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 388, + 566, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 566, + 515 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 566, + 515 + ], + "type": "text", + "content": "Lack of Spatial Relationships. Tabular data inherently lacks spatial or sequential relationships that are naturally found in other modalities [74], [50]. The order of columns has no semantic or spatial meaning, making tabular data permutation-invariant regarding features. Moreover, standard tabular machine learning assumes rows are independently and identically distributed (i.i.d.), further eliminating temporal or sequential correlations present in data such as video or time series. This absence of inherent spatial or sequential structure challenges deep learning architectures traditionally designed to exploit such dependencies." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 515, + 566, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 515, + 566, + 631 + ], + "spans": [ + { + "bbox": [ + 308, + 515, + 566, + 631 + ], + "type": "text", + "content": "Low-quality and Missing Data. Compared to image or text data, where contextual or spatial redundancies help manage missing or corrupted values, tabular data is more vulnerable to incomplete or erroneous entries [146], [147]. Missing values in tabular datasets can introduce significant biases and degrade prediction quality. Additionally, noisy or incorrect values can considerably affect model reliability. Data preprocessing steps, including data cleaning and imputation, become crucial to maintaining accuracy and robustness in tabular machine learning." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 631, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 631, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 631, + 566, + 748 + ], + "type": "text", + "content": "Importance of Feature Engineering. Effective tabular models heavily depend on the quality of their input features [45], [148]. Unlike image or textual data, where DNNs inherently learn feature representations from raw data, tabular methods often require domain-specific knowledge and meticulous manual feature engineering. Identifying and modeling complex, nonlinear interactions among tabular features frequently demands sophisticated transformations and expert insight, significantly impacting the predictive performance of models [149]." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 193 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 193 + ], + "type": "text", + "content": "Class Imbalance. Tabular datasets frequently exhibit imbalanced label distributions, especially in classification tasks, where certain categories are underrepresented [150], [151]. Class imbalance complicates model learning, leading to biased outcomes toward majority classes and poor performance on minority classes. Specialized methods such as oversampling, undersampling, or tailored loss functions (e.g., focal loss [152]) are required to address this imbalance effectively. Evaluation criteria like the AUC or F1-score further help assess model quality in imbalanced settings. Recent research highlights differences between deep and classical models in handling imbalance, emphasizing the need for careful consideration [153], [154], [155], [41]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 196, + 301, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 196, + 301, + 324 + ], + "spans": [ + { + "bbox": [ + 45, + 196, + 301, + 324 + ], + "type": "text", + "content": "Remark 2. Class imbalance has long been a known issue in the tabular domain, even before the rise of deep learning [156], and methods such as SMOTE [157], [158] can easily be extended to deep learning methods during preprocessing. However, Current deep tabular methods primarily assume that the training and testing data come from the same distribution, even in cases involving class imbalance. In addition, some class imbalance methods in visual domain can be readily extended to the tabular data learning [159], [160]. Therefore, we do not delve into class imbalance in this survey." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 331, + 301, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 331, + 301, + 563 + ], + "spans": [ + { + "bbox": [ + 44, + 331, + 301, + 563 + ], + "type": "text", + "content": "Scalability to Large Datasets. Tabular datasets can become large-scale and high-dimensional, presenting computational and generalization challenges [161]. With increasing dimensionality, the risk of overfitting increases, especially when the number of features significantly surpasses the number of samples. Consequently, efficient training algorithms, memory management strategies, and sufficient computational resources become essential. Effectively scaling tabular models to handle large datasets while maintaining generalization ability remains a challenging but critical research area [162]. Model Selection and Hyperparameter Tuning. Tabular models are particularly sensitive to hyperparameter settings [163], [164]. Selecting an appropriate model architecture and tuning hyperparameters, such as learning rate, layer depth, or number of trees, can be computationally expensive and time-consuming. Despite the advancement of automated machine learning (AutoML) techniques [165], [166], [167], efficiently identifying optimal configurations for deep tabular methods under practical constraints remains challenging and critical for achieving high predictive performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 563, + 301, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 563, + 301, + 679 + ], + "spans": [ + { + "bbox": [ + 44, + 563, + 301, + 679 + ], + "type": "text", + "content": "Domain-Specific Constraints. Certain application domains, such as healthcare or finance, impose additional regulatory or ethical requirements on model development [168]. For example, healthcare applications must comply with privacy standards like HIPAA [169] and provide explainability to clinicians. Financial models similarly must adhere to fairness regulations and industry standards. These constraints can restrict algorithm selection, necessitate interpretable predictions, and require additional validation, explainability, and auditability procedures [170], [171], [172]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 695, + 211, + 707 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 695, + 211, + 707 + ], + "spans": [ + { + "bbox": [ + 45, + 695, + 211, + 707 + ], + "type": "text", + "content": "2.4 Evaluation of a Tabular Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "type": "text", + "content": "We present the evaluation of tabular methods, ranging from traditional to modern, to provide a comprehensive evaluation across different aspects. For a given model on a dataset " + }, + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "type": "text", + "content": "we employ standard metrics that quantify the discrepancy between the predicted label " + }, + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "type": "text", + "content": " and the true label " + }, + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 307, + 42, + 564, + 65 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 66, + 566, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 66, + 566, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 66, + 566, + 215 + ], + "type": "text", + "content": "Evaluation on A Single Task. For classification tasks, Accuracy (or Error Rate) is commonly employed as the primary metric. AUC and F1 scores are further used to address imbalanced label distributions, while Expected Calibration Error (ECE) [173], [174] calculates the weighted average error of the estimated probabilities. All criteria are the higher, the better, except the error rate and ECE. For regression tasks, common metrics include Mean Squared Error (MSE), Mean Absolute Error (MAE), and Root Mean Squared Error (RMSE), with MAE and RMSE sharing the scale of the original labels. Lower values denote superior performance. Additionally, the coefficient of determination " + }, + { + "bbox": [ + 307, + 66, + 566, + 215 + ], + "type": "inline_equation", + "content": "(\\mathbb{R}^2)" + }, + { + "bbox": [ + 307, + 66, + 566, + 215 + ], + "type": "text", + "content": " is employed, with higher values indicating a better fit." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 216, + 566, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 216, + 566, + 284 + ], + "spans": [ + { + "bbox": [ + 307, + 216, + 566, + 284 + ], + "type": "text", + "content": "In tabular machine learning, the diversity of datasets makes it difficult for any single model to consistently excel across all scenarios. Therefore, evaluating models requires not only assessing their performance on individual datasets but also employing aggregated metrics that capture their overall effectiveness across multiple datasets." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "text", + "content": "Evaluation on A Set of Tasks. Early research predominantly relied on Average Rank (Friedman Rank) [12], [39], often used in conjunction with Critical Difference Comparisons, to evaluate model performance across multiple datasets. Models are ranked per dataset based on a chosen metric (e.g., accuracy, AUC, RMSE), and the average rank is computed across datasets. To ensure statistical robustness, hypothesis tests were employed to assess the significance of ranking differences, providing a more reliable comparative analysis. For multiple comparisons, tests such as the Wilcoxon-Holm, Fredman, and Nemiyi tests are employed [175]. To address the potential degradation of average rank by poor performance on some datasets, the Probability of Achieving the Maximum Accuracy (PAMA) [12] is defined as the fraction of datasets in which a model attains the highest accuracy. An alternative to PAMA accounts for near-optimal performance, " + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "inline_equation", + "content": "P95" + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "text", + "content": " quantifies the likelihood of a model attaining at least " + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "text", + "content": " of the maximum accuracy, which is computed as the ratio of datasets where the classifier achieves at least " + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 307, + 285, + 566, + 515 + ], + "type": "text", + "content": " of the maximum accuracy to the total number of datasets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 516, + 566, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 516, + 566, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 516, + 566, + 677 + ], + "type": "text", + "content": "As research progressed, more diverse evaluation metrics were introduced. The Arithmetic Mean of a chosen metric provides a direct comparison across datasets, but variations in the scales of evaluation metrics across datasets can distort results. To mitigate this issue, performance metrics are often normalized before aggregation, with normalized Accuracy applied to classification tasks and normalized RMSE (nRMSE) used for regression [36], [34]. Depending on the evaluation framework, Mean Normalized Error can be used, but its dependence on normalization can hinder independent optimization. To further address these limitations, the Shifted Geometric Mean (SGM) error was introduced, which aggregates errors multiplicatively, reducing sensitivity to extreme values and ensuring more stable cross-datasets/splits comparisons [34]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 677, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 566, + 748 + ], + "type": "text", + "content": "Beyond absolute performance, relative comparisons are also important. The Relative Improvement metric quantifies a model's performance gain over a baseline (e.g., a simple MLP), offering insight into efficiency relative to simpler alternatives [176]. More recently, drawing inspiration from the ELO rating system[177], [178], ELO-based evaluation has" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 90 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 90 + ], + "type": "text", + "content": "been introduced [179], modeling model-to-model comparisons as pairwise competitions across datasets. The ELO Score iteratively adjusts rankings based on relative performance, providing a more dynamic, fine-grained assessment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 105, + 226, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 105, + 226, + 115 + ], + "spans": [ + { + "bbox": [ + 45, + 105, + 226, + 115 + ], + "type": "text", + "content": "2.5 Tabular Benchmarks and Datasets" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 120, + 301, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 120, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 44, + 120, + 301, + 156 + ], + "type": "text", + "content": "This section introduces existing benchmarks and datasets, along with associated considerations for constructing the benchmarks and evaluation protocols." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 166, + 259, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 166, + 259, + 177 + ], + "spans": [ + { + "bbox": [ + 45, + 166, + 259, + 177 + ], + "type": "text", + "content": "2.5.1 Popular Tabular Benchmarks and Datasets" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 180, + 299, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 180, + 299, + 226 + ], + "spans": [ + { + "bbox": [ + 44, + 180, + 299, + 226 + ], + "type": "text", + "content": "We first introduce several benchmarks based on raw features constructed from various aspects. Then, we present datasets with rich semantics, following some tabular toolboxes and evaluation protocols." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 227, + 301, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 227, + 301, + 295 + ], + "spans": [ + { + "bbox": [ + 44, + 227, + 301, + 295 + ], + "type": "text", + "content": "Standard Benchmarks. Methods for tabular data have preferences depending on the dataset, and evaluating them on limited datasets can be easily influenced by randomness or other factors. Therefore, it's important to consider various aspects to ensure a more comprehensive and reliable benchmark evaluation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 296, + 301, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 301, + 456 + ], + "type": "text", + "content": "A comprehensive benchmark should cover a diverse set of datasets to test the model's generalization capabilities across different tasks and feature types. The benchmark should include datasets from different task types, including binary classification, multi-class classification, and regression tasks. [12] evaluates 179 classifiers across 17 families on 121 datasets, concluding that Random Forest variants were the most likely to perform best overall. [50] explores MLPs with parameterized techniques, such as ensembling and data augmentation, over 40 classification datasets. Similarly, [33] demonstrates the effectiveness of MLPs, ResNets, and transformer-based models on 11 datasets. [36] conducts experiments on 45 datasets, investigating the differences between tree-based and DNN-based methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 458, + 301, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 458, + 301, + 596 + ], + "spans": [ + { + "bbox": [ + 44, + 458, + 301, + 596 + ], + "type": "text", + "content": "The benchmark should cover datasets with varying sizes, including datasets with a large number of samples and features as well as smaller datasets. The diversity of dataset sizes helps evaluate the scalability and efficiency of different models. [39] includes 176 classification datasets and evaluate 19 methods, comprising 8 classical and 11 deep methods. In this study, the pre-trained TabPFN model [89] emerges as the top performer on average, even when limited to randomly sampled training sets of 3000 examples. However, limited trials for hyperparameter tuning and strict time constraints in [39] may have led to suboptimal evaluations for some deep tabular methods [180]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 596, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 302, + 748 + ], + "type": "text", + "content": "To ensure robustness and generalization, datasets from multiple domains should be included. Common domains for tabular data include healthcare, biology, finance, education, and physics. Additionally, some datasets are derived from other domains, such as image or speech data, by feature extraction. [181] evaluates attention mechanisms and contrastive learning methods across 28 tabular datasets, comparing their performance with traditional deep learning and machine learning approaches. [44], with a particular focus on DNN-based models, uses a benchmark of over 300 tabular datasets spanning a wide range of task types, sizes, and domains. A more diverse collection allows us to assess whether a tabular method can generalize across applications." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 42, + 566, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 331 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 331 + ], + "type": "text", + "content": "Semantic-Enriched Datasets. In addition, recent research has also focused on evaluating tabular data with rich semantics, such as incorporating meta information related to tasks or integrating attribute names. UniTabE [182] introduces a 7TB dataset containing 13 billion tabular examples for tabular pre-training, covering domains with investing, time series analysis, finance, economics, and with numerical, categorical, text data types. CM2 [79] proposes OpenTabs for crosstab pre-training, which contains an extensive collection of large-scale tables with column name semantics, including approximately 46M tabular samples. TP-BERTa [78] filters the OpenTabs for datasets with at least 10,000 samples and no more than 32 features, resulting in 101 binary classification datasets and 101 regression datasets with about 10 million samples. GTL [81] curates a collection of 384 public tabular datasets from Kaggle, which includes 176 classification and 208 regression tasks spanning a wide range of industrial domains. TabLib collects a set of 627M tables totaling 69TiB, along with 867B tokens of context [183]. TabLib was extracted from numerous file formats, including CSV, HTML, SQLite, PDF, Excel, and others, sourced from GitHub and Common Crawl. T4 (The Tremendous Tablib Trawl) [92] takes account of the inscrutable statistics and call sheets with personally identifiable information in TabLib and filters TabLib into a collection of 4M tables with 2.1B rows." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 331, + 567, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 331, + 567, + 436 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 567, + 436 + ], + "type": "text", + "content": "Among these benchmarks and datasets, the semantic-rich ones are primarily used for pre-training LLMs on tabular data, while the others are mainly employed for evaluating standard methods. Besides, some toolboxes implement methods over tabular data, including those for classical methods, as well as those for deep tabular methods [184], [185], [186], [187], [188]. To establish a comprehensive tabular benchmark, several factors need to be considered, including the range of datasets and data quality." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 438, + 567, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 438, + 567, + 555 + ], + "spans": [ + { + "bbox": [ + 309, + 438, + 567, + 555 + ], + "type": "text", + "content": "Remark 3. Recent studies have proposed alternative perspectives for tabular evaluations, such as focusing on dataset age [42], leveraging expert-level feature engineering [43], and considering dataset version [44]. Studies have also highlighted generalization in open word environments in tabular datasets [43], [109], where the distributions of training, validation, and test sets differ significantly. More discussions are in Section 9. Incorporating diverse, high-quality datasets helps build a reliable benchmark for meaningful model comparisons." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 570, + 430, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 570, + 430, + 580 + ], + "spans": [ + { + "bbox": [ + 309, + 570, + 430, + 580 + ], + "type": "text", + "content": "2.5.2 Evaluation Protocols" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 584, + 566, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 584, + 566, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 584, + 566, + 643 + ], + "type": "text", + "content": "Given the strong sensitivity of tabular methods to data and the additional randomness in deep methods, robust evaluation is essential. Furthermore, due to the high computational cost of some methods, it is equally important to ensure evaluation efficiency." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 643, + 567, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 643, + 567, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 643, + 567, + 748 + ], + "type": "text", + "content": "Model Selection. Model selection on the validation set involves both hyperparameter tuning and early stopping, which are essential for reliable evaluation. Due to the large number of hyperparameters in deep methods, automated methods like Optuna [189] are commonly used to explore hyperparameters through multiple trials [33], [69]. During tuning, models are evaluated on the validation split, while models can also be trained with multiple random seeds, providing more reliable evaluations. In each trial and the" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 77 + ], + "type": "text", + "content": "final training, early stopping [190] often employed to prevent overfitting, and the epoch with the best validation performance is selected as the final model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 77, + 301, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 77, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 44, + 77, + 301, + 239 + ], + "type": "text", + "content": "Performance Evaluation. To assess generalization and prevent overfitting, models are typically evaluated using separate train/val/test splits, with a typical split ratio of " + }, + { + "bbox": [ + 44, + 77, + 301, + 239 + ], + "type": "inline_equation", + "content": "64\\% / 16\\% / 20\\%" + }, + { + "bbox": [ + 44, + 77, + 301, + 239 + ], + "type": "text", + "content": ". However, such fixed splits may yield inconsistent results. With the rise of deep learning, researchers have proposed more robust evaluation protocols to better reflect model capabilities [191]. Two main approaches are commonly used: (1) fixing the data split and running multiple trials with different random seeds [54], [59], [105], [69], [62], [87], [33], [58], [192], [65], [71]; and (2) using cross-validation, where new train/val/test splits are generated in each fold [63], [89], [193], [68], [34]. A hybrid strategy combining both random seeds and cross-validation is also adopted [194]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 239, + 301, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 239, + 301, + 413 + ], + "spans": [ + { + "bbox": [ + 44, + 239, + 301, + 413 + ], + "type": "text", + "content": "Recent studies show that holdout-based hyperparameter tuning can be unstable and prone to overfitting to the validation set [195], [180]. [180] found it ineffective on most TabZilla [39] datasets and instead used 5-fold cross-validation for more robust hyperparameter selection. As a result, they found the key meta-feature findings reported in [39] no longer held. This observation was also discussed in [44], which further identified meta-features that have a greater impact on model performance. For small datasets, alternative strategies have been proposed [196], [197], [198]. However, this approach significantly reduces the efficiency of hyperparameter search. [199] showed that simply reshuffling data splits can often improve generalization, making holdout selection competitive with cross-validation while remaining more computationally efficient." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 431, + 248, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 431, + 248, + 443 + ], + "spans": [ + { + "bbox": [ + 45, + 431, + 248, + 443 + ], + "type": "text", + "content": "3 FROM CLASSICAL TO DEEP METHOD" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 447, + 300, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 447, + 300, + 483 + ], + "spans": [ + { + "bbox": [ + 44, + 447, + 300, + 483 + ], + "type": "text", + "content": "We present possible advantages of deep learning for tabular data, as well as the potential challenges of deep learning when compared with tree-based methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 499, + 269, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 499, + 269, + 511 + ], + "spans": [ + { + "bbox": [ + 44, + 499, + 269, + 511 + ], + "type": "text", + "content": "3.1 Advantages of deep representation learning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 515, + 301, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 515, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 44, + 515, + 301, + 538 + ], + "type": "text", + "content": "Deep tabular models offer several advantages beyond performance when compared with classical methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 539, + 301, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 539, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 44, + 539, + 301, + 653 + ], + "type": "text", + "content": "Ability to Model Complex Feature Interactions. DNNs are particularly adept at capturing high-order, non-linear interactions between features, which may be challenging for traditional models like linear regression or decision trees [51], [54]. By learning a hierarchical representation of features, DNNs allow low-level feature interactions to be captured in the initial layers, while higher-order interactions are identified in deeper layers. This ability to automatically learn complex relationships makes DNNs highly effective in capturing intricate dependencies within tabular data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "content": "End-to-End Learning. Unlike traditional machine learning methods, which often involve separate steps for feature engineering, preprocessing, and model tuning, DNNs can process raw features and automatically extract useful representations without complex manual transformations. This end-to-end learning approach reduces human bias and simplifies the workflow, making the process more efficient. DNNs are trained through gradient optimization, enabling" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 42, + 566, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 215 + ], + "type": "text", + "content": "a unified, streamlined solution for complex tasks [33], [107]. Additionally, deep models support multi-task learning, allowing related tasks to benefit from shared representations, enhancing both performance and efficiency [200], [70], [49]. Integration with Other Modalities. Deep tabular methods excel in multi-modal pipelines, where tabular data is integrated with other modalities, such as images, audio, or text. In AI4science applications, for instance, tabular data might be combined with image data [106], [107] (e.g., in medical imaging applications) or time-series data [201], [202] (e.g., in forecasting tasks). DNNs are well-suited to model interactions between heterogeneous data types, improving the overall performance. By jointly learning from multiple data sources, DNNs enhance their ability to make more accurate and comprehensive predictions across domains." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 215, + 567, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 215, + 567, + 353 + ], + "spans": [ + { + "bbox": [ + 308, + 215, + 567, + 353 + ], + "type": "text", + "content": "Flexibility with Dynamic Environments. DNN-based methods benefit from the flexibility of gradient-based optimization, which allows efficient and iterative training. This flexibility makes DNNs adaptable to changing objectives without significant modifications, unlike tree-based models that often require specialized methods for different tasks [9]. Moreover, DNNs excel in dynamic environments, such as real-time predictions, financial analysis, and decision-making systems, where feature relationships may shift. This adaptability makes them suitable for online learning or incremental training, where new data is continuously integrated without retraining from scratch [203], [204]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 354, + 566, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 566, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 566, + 492 + ], + "type": "text", + "content": "Long-Term Knowledge Transfer and Learning. DNNs are capable of long-term learning and knowledge transfer, which allows them to retain valuable knowledge gained from training on diverse tasks [205]. Once trained on a broad set of tasks, DNNs can transfer this knowledge to related domains, reducing the need for complete retraining [206]. This is especially advantageous in fields like AI4science, where a model trained on one type of scientific data can be adapted to other related domains, saving both time and computational resources. This ability to transfer knowledge across tasks is a key advantage of deep learning, enabling more efficient use of data and model capabilities over time." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 502, + 556, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 502, + 556, + 513 + ], + "spans": [ + { + "bbox": [ + 309, + 502, + 556, + 513 + ], + "type": "text", + "content": "3.2 Debates between Tree-Based Methods and DNNs" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 516, + 566, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 516, + 566, + 608 + ], + "spans": [ + { + "bbox": [ + 307, + 516, + 566, + 608 + ], + "type": "text", + "content": "Although deep tabular methods have shown great potential in learning semantic representations and constructing nonlinear predictors, their initial performance often struggles to surpass that of classical tree-based ensemble methods, such as Gradient Boosted Decision Trees (GBDT). Many studies still treat GBDT approaches as strong baselines [36], [39], and in some cases, the advantages of deep tabular methods diminish as the number of evaluation datasets increases." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 609, + 566, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 609, + 566, + 631 + ], + "spans": [ + { + "bbox": [ + 308, + 609, + 566, + 631 + ], + "type": "text", + "content": "Several reasons contribute to why tree-based methods retain their advantages over DNNs in many tabular tasks:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 632, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 632, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 632, + 566, + 748 + ], + "type": "text", + "content": "Better Handling of High-Frequency Data. Tree-based methods, particularly GBDT models, are highly efficient at handling high-frequency data or dense datasets with many small variations [38]. These models build decision trees by recursively splitting the data at the most informative feature points, capturing both local and global patterns efficiently. DNNs, on the other hand, may not capture fine-grained patterns as effectively without extensive regularization or tuning [207], [208]. To address this limitation, [38] introduced frequency reduction as an inductive bias through" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 41, + 301, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 41, + 301, + 78 + ], + "spans": [ + { + "bbox": [ + 44, + 41, + 301, + 78 + ], + "type": "text", + "content": "the addition of scaling layers, while [45] demonstrated that periodic activation functions can significantly enhance neural networks' ability to learn high-frequency functions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 83, + 301, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 83, + 301, + 212 + ], + "spans": [ + { + "bbox": [ + 44, + 83, + 301, + 212 + ], + "type": "text", + "content": "Natural Handling of Mixed Data Types. Tabular data often includes a combination of numerical, categorical, and ordinal features [9], [44], [209]. Tree-based models are particularly strong when working with mixed data types, as they can handle categorical features directly without requiring one-hot encoding or embeddings. This ability to work with raw categorical data simplifies the preprocessing pipeline significantly. DNNs, however, generally require encoding techniques (e.g., one-hot encoding or learned embeddings) for categorical features, adding complexity and potentially leading to suboptimal performance [63]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 217, + 301, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 217, + 301, + 345 + ], + "spans": [ + { + "bbox": [ + 44, + 217, + 301, + 345 + ], + "type": "text", + "content": "Lower Computational Requirements for Training and Inference. For certain tasks, tree-based models tend to be more computationally efficient than DNNs [33]. GBDTs and other decision tree-based models can train relatively quickly and are less computationally intensive than deep neural networks [210], [39]. This is especially true when the dataset is not massive or when the model needs to be trained and deployed rapidly. DNNs, on the other hand, often require significant computational resources (e.g., GPUs, longer training times) to achieve comparable performance, making them less ideal in resource-constrained environments [211], [88]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 351, + 301, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 351, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 44, + 351, + 301, + 456 + ], + "type": "text", + "content": "Robustness to Noisy and Missing Data. Tree-based models are generally more robust to noisy data and missing values. When training a decision tree, missing values can be handled through optimal splitting that accommodates absent data, and trees can effectively deal with noisy or inconsistent data points [36]. DNNs, in contrast, are more sensitive to noise and often require careful preprocessing or specific techniques (e.g., data imputation or noise filtering) to avoid performance degradation with noisy or missing data [65], [89]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 463, + 302, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 463, + 302, + 635 + ], + "spans": [ + { + "bbox": [ + 44, + 463, + 302, + 635 + ], + "type": "text", + "content": "Interpretability and Transparency. Tree-based models offer a significant advantage in terms of interpretability [60], [61], [105]. The decision-making process of models like GBDT can be easily visualized in the form of decision paths, and feature importance can be directly extracted [130], [132], [131]. This transparency makes tree-based models appealing in domains where model explainability is crucial, such as in finance, healthcare, and regulated industries. Although interpretability techniques like LIME [212] and SHAP [213] exist for DNNs, tree-based models still tend to be more intuitive and easier to explain, especially in complex decision-making environments. Recent works [214], [60], [59], [193] have sought to bridge this gap by enhancing neural network interpretability through emulation of tree-based model behaviors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 642, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 642, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 642, + 301, + 748 + ], + "type": "text", + "content": "Handling Outliers and Skewed Data. Tree-based methods are often better at handling outliers and skewed distributions in the data. When a feature exhibits extreme values or skewed distributions, decision trees are inherently less sensitive to such anomalies because they create splits based on feature ranges that naturally isolate outliers. This characteristic can make them more robust than DNNs, which may require specialized loss functions or techniques (e.g., robust scaling or outlier removal) to handle such data points [43], [109]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 41, + 524, + 53 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 41, + 524, + 53 + ], + "spans": [ + { + "bbox": [ + 309, + 41, + 524, + 53 + ], + "type": "text", + "content": "4 TAXONOMY OF SPECIALIZED METHODS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 57, + 565, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 57, + 565, + 148 + ], + "spans": [ + { + "bbox": [ + 308, + 57, + 565, + 148 + ], + "type": "text", + "content": "Similar to the evolution of deep learning, which progresses from specialized learning to transfer learning and ultimately to foundation models [244], we categorize deep tabular methods into three groups, as shown in Figure 2: specialized methods, transferable methods, and general methods. This classification reflects both the evolutionary development of deep learning techniques and the increasing generalization capabilities of these models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "spans": [ + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "text", + "content": "Specialized methods, being the earliest developed and most widely used category, will be our starting point for discussion. Tabular data consists of features (columns), samples (rows), and objectives (labels), which together define the structure and the task objectives. We emphasize detailed strategies for obtaining high-quality representations at both feature- and sample-level for the target task. Specifically, given the input data, according to the general learning objective in Equation 1, we consider how to transform the tabular input " + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "text", + "content": " (feature aspect), how to construct relationships between samples (sample aspect), how to design the objective " + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "inline_equation", + "content": "\\ell(\\cdot)" + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "text", + "content": " and regularize " + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "inline_equation", + "content": "\\Omega(\\cdot)" + }, + { + "bbox": [ + 308, + 148, + 566, + 287 + ], + "type": "text", + "content": " (objective aspect). In particular," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 288, + 566, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 288, + 566, + 368 + ], + "spans": [ + { + "bbox": [ + 309, + 288, + 566, + 368 + ], + "type": "text", + "content": "- Feature Aspect. We focus on how to transform the raw tabular input (in various forms) into intermediate representations. We consider two types of features: numerical and categorical. By explicitly modeling the relationships between the two features (e.g., feature importance and interactions), we are able to enhance the model's understanding of the input space." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 368, + 566, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 368, + 566, + 437 + ], + "spans": [ + { + "bbox": [ + 309, + 368, + 566, + 437 + ], + "type": "text", + "content": "- Sample Aspect. In addition to features, we explore how to retrieve and utilize neighboring samples to capture intersample dependencies, thereby improving predictions. In order to improve the model's ability to make predictions, we explore the relationships between a target sample and its \"extracted neighbors.\"" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 437, + 566, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 437, + 566, + 506 + ], + "spans": [ + { + "bbox": [ + 309, + 437, + 566, + 506 + ], + "type": "text", + "content": "- Objective Aspect. We examine how to modify the loss function and overall objective to introduce inductive biases. By directly guiding the learning process with the target variables, we incorporate prior knowledge or task-specific preferences into the model, thereby improving its generalizability and interpretability." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 506, + 565, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 506, + 565, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 506, + 565, + 611 + ], + "type": "text", + "content": "In specialized methods, we focus solely on learning from pure data, excluding feature semantics considered in transferable methods (in Section 6), as they leverage the capabilities of language models. Since specialized methods encompass a wide range of approaches, and feature-aspect methods are the most extensive part of them, we will first introduce sample-aspect methods and objective-aspect methods in the following subsections. In Section 5, we will provide a detailed introduction to feature-aspect methods." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 622, + 499, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 622, + 499, + 634 + ], + "spans": [ + { + "bbox": [ + 309, + 622, + 499, + 634 + ], + "type": "text", + "content": "4.1 Sample-aspect Specialized Methods" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "type": "text", + "content": "Sample interaction methods take a retrieval-based approach, focusing on relationships between individual samples rather than features. In a tabular dataset, each sample " + }, + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "type": "text", + "content": " represents a row with " + }, + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 307, + 636, + 565, + 706 + ], + "type": "text", + "content": " features, and the goal is to leverage relationships between a target sample and its \"extracted neighbors\" to improve predictions." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 706, + 565, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 706, + 565, + 729 + ], + "spans": [ + { + "bbox": [ + 308, + 706, + 565, + 729 + ], + "type": "text", + "content": "The general form for the sample interaction methods can be expressed as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 389, + 734, + 565, + 748 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 734, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 389, + 734, + 565, + 748 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {i} = f \\left(\\mathcal {R} \\left(\\boldsymbol {x} _ {i}, \\mathcal {D}; \\Phi\\right)\\right), \\tag {2}", + "image_path": "58235d84006ce990581e96726220630221697514d510637a9cf60a4a939d7036.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 70, + 531, + 259 + ], + "blocks": [ + { + "bbox": [ + 44, + 38, + 566, + 63 + ], + "lines": [ + { + "bbox": [ + 44, + 38, + 566, + 63 + ], + "spans": [ + { + "bbox": [ + 44, + 38, + 566, + 63 + ], + "type": "text", + "content": "Table 1: The taxonomy of representation learning for tabular data. The shade color in the last column denotes the subcategory, which is consistent with Figure 3." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 77, + 70, + 531, + 259 + ], + "lines": [ + { + "bbox": [ + 77, + 70, + 531, + 259 + ], + "spans": [ + { + "bbox": [ + 77, + 70, + 531, + 259 + ], + "type": "table", + "html": "
Algorithm CategoryReference
Specialized Methods§ 5Feature-aspect MethodsFeature Encoding[33], [45], [64]
Feature Selection[59], [60], [105], [61], [193]
Feature Projection[52], [33], [34], [58]
Feature Interaction[54], [62], [63], [55], [65], [49], [215]
§ 4.1Sample-aspect MethodsSample Interaction[70], [216], [217], [192], [67]
Neighbor Retrieval[218], [68], [69], [35]
§ 4.2Objective-aspect MethodsTraining Objective[67]
Training Regularization[219], [50], [66]
§ 6Transferable MethodsHomogeneous[63], [48], [70], [220], [46], [221], [222], [223], [47], [224], [225], [226], [227]
Heterogeneous[228], [229], [222], [72], [73], [64], [230], [231]
Language Model[77], [232], [182], [79], [78], [233], [234], [82], [83], [235], [236], [80], [237]
Vision Model[238], [239], [240], [74], [75], [241], [242], [76]
§ 7General MehtodsRaw-Feature-based[86], [87], [88]
TabPFN Variants[89], [91]
Semantics-based[92], [93], [94], [243]
", + "image_path": "172234d4f3ca90801d6fb35295f332435c76d1756e9f1a75e41d12052939df1d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 264, + 556, + 559 + ], + "blocks": [ + { + "bbox": [ + 50, + 264, + 556, + 559 + ], + "lines": [ + { + "bbox": [ + 50, + 264, + 556, + 559 + ], + "spans": [ + { + "bbox": [ + 50, + 264, + 556, + 559 + ], + "type": "image", + "image_path": "65ce6230b3d29ce1199fc2127cfe0a5435c735cc905b76dbc127228712d8de2d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 568, + 565, + 593 + ], + "lines": [ + { + "bbox": [ + 44, + 568, + 565, + 593 + ], + "spans": [ + { + "bbox": [ + 44, + 568, + 565, + 593 + ], + "type": "text", + "content": "Figure 3: The roadmap of deep representation learning tabular methods. We organize representative methods chronologically to show the concentration at different stages. Different colors of these methods denote the sub-categories." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "spans": [ + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": " is the set of all samples (training data) available for retrieval or learning. " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(\\cdot)" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": " is the sample interaction module, which retrieves or aggregates information from relevant samples in " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": " for the target sample " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": " represents the learnable parameters of " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": " is the prediction head that maps the aggregated information to the final output " + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 44, + 612, + 300, + 684 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 689, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 689, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 689, + 301, + 748 + ], + "type": "text", + "content": "Sample aspect approaches can be broadly categorized into two main strategies. The first approach introduces the modeling of sample relationships " + }, + { + "bbox": [ + 44, + 689, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 44, + 689, + 301, + 748 + ], + "type": "text", + "content": " during representation training, allowing the model to learn better representations by capturing inter-sample dependencies. The second ap" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 612, + 565, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 612, + 565, + 648 + ], + "spans": [ + { + "bbox": [ + 307, + 612, + 565, + 648 + ], + "type": "text", + "content": "proach is retrieval-based models, which directly predict outcomes by learning how to retrieve and utilize neighbors' relationships " + }, + { + "bbox": [ + 307, + 612, + 565, + 648 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 307, + 612, + 565, + 648 + ], + "type": "text", + "content": " when testing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 651, + 564, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 651, + 564, + 709 + ], + "spans": [ + { + "bbox": [ + 307, + 651, + 564, + 709 + ], + "type": "text", + "content": "Sample Interaction. These methods assist in representation learning by allowing the model to capture relationships between samples, which in turn helps generate a more robust representation during training. During testing, the model becomes more sensitive to each sample without interaction." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 712, + 564, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 564, + 747 + ], + "type": "text", + "content": "SAINT [70] introduces inter-sample attention beyond inter-attribute attention, which improves row classification by relating each row to others in the table. NPT [216]" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 180 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 180 + ], + "type": "text", + "content": "extends this via non-parametric Transformers, whereas Hopular [217] employs Hopfield networks, sharing conceptual alignment with SAINT [70]. Unlike nearest-neighbor classification, the distance metric is learned end-to-end. Prompt [192] posits that the feature importance in tabular data is sample-dependent. During feature extraction, it treats the information between samples as prompts. PTaRL [67] identifies two issues in the representation of tabular data samples: entanglement and localization. It addresses these by modeling global sample relationships through prototype generation and representation projection, helping the model produce clear and consistent decisions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 180, + 301, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 180, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 44, + 180, + 301, + 239 + ], + "type": "text", + "content": "Neighbor Retrieval. These methods construct high-quality contexts to aid prediction by retrieving valuable neighbors and designing efficient ways to utilize them based on the relationships between samples. The training data is used to assist during testing." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 239, + 302, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 302, + 447 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 302, + 447 + ], + "type": "text", + "content": "DNNR [68] argues that a key advantage of neighbor-based methods is the model's transparency, meaning that the model's decisions can be explained by inspecting its components. It enhances predictive performance by incorporating local gradient estimation and Taylor series approximation into the KNN framework. TabR [69] proposes that, compared to purely parametric (e.g., retrieval-free) models, retrieval-based models can achieve superior performance while also exhibiting several practically important properties, such as the ability for incremental learning and enhanced robustness. It encodes all candidate samples and then employs an attention-like mechanism to retrieve the samples that aid in making predictions, as explored in [218]. ModernNCA [35] revitalizes the classic tabular prediction method, Neighbourhood Component Analysis (NCA) [245], by designing and incorporating deep learning architectures and strategies. The resulting method efficiently leverages neighboring samples for prediction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 449, + 302, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 449, + 302, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 449, + 302, + 460 + ], + "type": "text", + "content": "Remark 4. The neighborhood-based approach closely resembles" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 460, + 302, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 460, + 302, + 577 + ], + "spans": [ + { + "bbox": [ + 58, + 460, + 302, + 577 + ], + "type": "text", + "content": "bles the current in-context learning [246] mechanism. In particular, the in-context learning used in general models like TabPFN [89], [91] can aslo be considered a form of the neighborhood method. This concept of neighborhood not only helps in standard tasks, but also enhances transferable and general methods. For example, LoCalPFN [90] highlights that employing local linear regression can lead to more expressive decision boundaries, while utilizing local context allows performance to scale with the size of the training dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 593, + 244, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 593, + 244, + 605 + ], + "spans": [ + { + "bbox": [ + 45, + 593, + 244, + 605 + ], + "type": "text", + "content": "4.2 Objective-aspect Specialized Methods" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "text", + "content": "The general objective learning " + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "text", + "content": " follows the structural risk minimization as in Equation 1, where " + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "text", + "content": " is the loss function to set the training objective between the prediction and the ground truth label. " + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "inline_equation", + "content": "\\Omega(\\cdot)" + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "text", + "content": " is the regularization on the model, which directs the objective or restricts the complexity of " + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 44, + 607, + 301, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "type": "text", + "content": "In traditional machine learning, models often rely on explicit regularization techniques on " + }, + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "type": "text", + "content": " to ensure good generalization. Methods such as decision trees, support vector machines, and linear models typically incorporate regularization terms directly into the loss function to control model complexity and prevent overfitting. For example, in linear regression, regularization methods like L1 (Lasso) [247], L2" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 312, + 44, + 441, + 106 + ], + "blocks": [ + { + "bbox": [ + 312, + 44, + 441, + 106 + ], + "lines": [ + { + "bbox": [ + 312, + 44, + 441, + 106 + ], + "spans": [ + { + "bbox": [ + 312, + 44, + 441, + 106 + ], + "type": "image", + "image_path": "6bfc55f39ab4036bf3a443b1a972dd614f4618aff254eec32b873d00f78d2a83.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 331, + 118, + 410, + 180 + ], + "blocks": [ + { + "bbox": [ + 331, + 118, + 410, + 180 + ], + "lines": [ + { + "bbox": [ + 331, + 118, + 410, + 180 + ], + "spans": [ + { + "bbox": [ + 331, + 118, + 410, + 180 + ], + "type": "image", + "image_path": "8beeccceaf07e5ef22e0485f90f15baa5f05fc84f8c7d42c772317522657ca6d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 194, + 566, + 229 + ], + "lines": [ + { + "bbox": [ + 308, + 194, + 566, + 229 + ], + "spans": [ + { + "bbox": [ + 308, + 194, + 566, + 229 + ], + "type": "text", + "content": "Figure 4: Illustration of feature-aspect methods, including feature encoding, feature selection, feature projection and feature interaction." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 456, + 44, + 561, + 103 + ], + "blocks": [ + { + "bbox": [ + 456, + 44, + 561, + 103 + ], + "lines": [ + { + "bbox": [ + 456, + 44, + 561, + 103 + ], + "spans": [ + { + "bbox": [ + 456, + 44, + 561, + 103 + ], + "type": "image", + "image_path": "ba9c9bbbcbdaf4046aec7ae3197352481649a45b1f333aeeab8500a90b2de4d9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 455, + 118, + 553, + 185 + ], + "blocks": [ + { + "bbox": [ + 455, + 118, + 553, + 185 + ], + "lines": [ + { + "bbox": [ + 455, + 118, + 553, + 185 + ], + "spans": [ + { + "bbox": [ + 455, + 118, + 553, + 185 + ], + "type": "image", + "image_path": "f88240d6a6ad7bfcf76cb01cc12c3e87300a15292b268038c8a926ab317be95e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 241, + 565, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 565, + 277 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 565, + 277 + ], + "type": "text", + "content": "(Ridge) [248], or Elastic-Nets [249] penalize large coefficients, effectively controlling the complexity of the model and helping to maintain a balance between bias and variance." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "spans": [ + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "type": "text", + "content": "Objective-aspect methods in deep learning are an extension of these traditional regularization techniques, where inductive bias is introduced by adjusting the loss function " + }, + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "type": "text", + "content": " or adding regularizers " + }, + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 308, + 278, + 566, + 348 + ], + "type": "text", + "content": ". In the training process, the goal is to leverage regularization on the model to improve predictions." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "spans": [ + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "type": "text", + "content": "Remark 5. Pre-train methods such as homogeneous transferable tabular methods in Section 6 also change the loss function " + }, + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "type": "text", + "content": " or the regularization " + }, + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 309, + 361, + 566, + 407 + ], + "type": "text", + "content": " to help pre-training. We will discuss these methods later." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 430, + 565, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 430, + 565, + 489 + ], + "spans": [ + { + "bbox": [ + 308, + 430, + 565, + 489 + ], + "type": "text", + "content": "Objective-aspect approaches can be broadly categorized into two main strategies. The first approach involves training objectives, which enhance the model with a specialized ability. The second approach introduces a regularizer, allowing the model to learn strong generalized representations." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 490, + 565, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 490, + 565, + 560 + ], + "spans": [ + { + "bbox": [ + 308, + 490, + 565, + 560 + ], + "type": "text", + "content": "Training Objective. For training objectives, PTaRL [67] constructs prototype-based projection space and learns the disentangled representation around global prototypes. PTaRL uses a diversification constraint for representation calibration and introduces a matrix orthogonalization constraint to ensure the independence of global prototypes." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 561, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 561, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 561, + 566, + 746 + ], + "type": "text", + "content": "Training Regularization. For training regularization, RLNs [219] overcome the challenge of an intractable number of hyperparameters during training by introducing an efficient tuning scheme, which minimizes a new \"Counterfactual Loss.\" In RLNs, the regularization coefficients are optimized together with learning the network weight parameters. RLNs produce extremely sparse networks, thus providing more interpretable models and revealing the importance that the network assigns to different inputs. [50] introduces \"cocktails,\" dataset-specific combinations of 13 regularization techniques, showing that even simple neural networks can outperform tree-based architectures when optimized with these methods. TANGOS [66] introduces a regularization-based improvement. It regularizes neuron attributions to encourage neurons to specialize and become orthogonal to one another." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 41, + 277, + 53 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 41, + 277, + 53 + ], + "spans": [ + { + "bbox": [ + 45, + 41, + 277, + 53 + ], + "type": "text", + "content": "5 FEATURE-ASPECT SPECIALIZED METHODS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 57, + 301, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 57, + 301, + 149 + ], + "spans": [ + { + "bbox": [ + 44, + 57, + 301, + 149 + ], + "type": "text", + "content": "Tabular data is characterized by a diverse set of features, including both categorical and numerical variables. The complexity of tabular data arises from the variety of feature types, their interrelationships, and the high dimensionality often present. Traditional methods often rely on manual feature engineering, using techniques such as encoding categorical variables and selecting relevant features to improve model performance and reduce overfitting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 150, + 301, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 150, + 301, + 266 + ], + "spans": [ + { + "bbox": [ + 44, + 150, + 301, + 266 + ], + "type": "text", + "content": "As deep learning has evolved, these traditional techniques have been integrated and expanded upon. Deep tabular models are capable of automatically learning complex feature representations, reducing the need for explicit feature engineering. Feature-aspect methods, such as feature encoding, selection, projection, and interaction, are essential for transforming raw tabular inputs into more informative intermediate forms. These methods help improve a model's ability to capture intricate relationships between features, thereby enhancing its generalization capabilities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 281, + 151, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 281, + 151, + 293 + ], + "spans": [ + { + "bbox": [ + 45, + 281, + 151, + 293 + ], + "type": "text", + "content": "5.1 Feature Encoding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 295, + 301, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 295, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 44, + 295, + 301, + 354 + ], + "type": "text", + "content": "Various encoding strategies have been explored for both categorical and numerical features in tabular data. Additionally, with the advancement of the attention mechanism, feature tokenization, similar to word embeddings in natural language processing, transforms all features into embeddings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 354, + 301, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 354, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 44, + 354, + 301, + 422 + ], + "type": "text", + "content": "Categorical Encoding. Categorical variables represent types of data which may be divided into groups. Examples of categorical variables are race, sex, age group, and educational level [250]. The categorical features are usually transformed in an index (integer). The two most popular techniques are an Ordinal Encoding and a One-Hot Encoding." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 423, + 301, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 423, + 301, + 562 + ], + "spans": [ + { + "bbox": [ + 44, + 423, + 301, + 562 + ], + "type": "text", + "content": "Ordinal Encoding assigns each unique category a distinct integer value. This approach is useful when the categorical variable has an inherent order, such as \"low,\" \"medium,\" and \"high.\" The main advantage of Ordinal Encoding is its simplicity and efficiency, as it transforms the categorical variable into a single numeric column. However, it assumes that there is an ordinal relationship between the categories, which may not always be the case. For instance, if the categorical variable represents \"color\" with categories such as \"red,\" \"blue,\" and \"green,\" applying Ordinal Encoding would introduce an artificial order that does not reflect any meaningful ranking." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "text", + "content": "On the other hand, One-Hot Encoding creates a new binary column for each unique category in the original categorical variable. For example, for a variable \"color\" with three categories (red, blue, and green), One-Hot Encoding would generate three binary columns: \"is_red,\" \"is_green,\" and \"is_green,\" encoding red as " + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "inline_equation", + "content": "(1,0,0)" + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "text", + "content": ", blue as " + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "inline_equation", + "content": "(0,1,0)" + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "text", + "content": " and green as " + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "inline_equation", + "content": "(0,0,1)" + }, + { + "bbox": [ + 44, + 562, + 301, + 723 + ], + "type": "text", + "content": ". Each column indicates the presence or absence of that particular category. One-Hot Encoding is useful for nominal categorical variables, where no order exists between the categories. While One-Hot Encoding avoids the assumption of ordinal relationships, it can lead to a high-dimensional feature space if the categorical variable has many unique values, which may result in increased computational costs and potential issues with overfitting." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 723, + 301, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 723, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 45, + 723, + 301, + 747 + ], + "type": "text", + "content": "In some cases, more advanced encoding techniques are used to address the limitations of these basic approaches." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 42, + 566, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 123 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 123 + ], + "type": "text", + "content": "For example, Target Encoding assigns each category a value based on the mean of the target variable for that category. This method can be useful when there is a strong relationship between the categorical features and the target. In Leave-one-out embedding, every category is replaced with the mean of the target variable of that category, which excludes the current row to avoid overfitting." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 123, + 566, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 123, + 566, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 123, + 566, + 308 + ], + "type": "text", + "content": "Numerical Encoding. For encoding, MLP-PLR [45] introduces two numerical encoding methods: Piecewise Linear Encoding (PLE) and Periodic Activation Functions. These encoding methods can be integrated with other differentiable layers (e.g., Linear, ReLU) to enhance performance. PLE produces alternative initial representations for the original scalar values and is based on feature binning. Periodic Activation Functions take into account the fact the embedding framework where all features are computed independently of each other forbids mixing features during the embedding process and train the pre-activation coefficients instead of keeping them fixed. [38] utilizes tools from spectral analysis, showing that functions described by tabular datasets often have high irregularity, and can be smoothed by transformations such as scaling and ranking to improve performance. They propose \"frequency reduction\" as an inductive bias during training." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 308, + 566, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 566, + 378 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 566, + 378 + ], + "type": "text", + "content": "Feature Tokenization. Feature tokenizer performs a similar role to the feature extractor in traditional models. It transforms the input features to embeddings [62], [33]. Since the feature representations of features are very sparse and high-dimensional, a common way is to represent them into low-dimensional spaces (e.g., word embeddings)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 378, + 566, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 378, + 566, + 401 + ], + "spans": [ + { + "bbox": [ + 308, + 378, + 566, + 401 + ], + "type": "text", + "content": "The general form for feature tokenization can be expressed as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 375, + 402, + 564, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 402, + 564, + 416 + ], + "spans": [ + { + "bbox": [ + 375, + 402, + 564, + 416 + ], + "type": "interline_equation", + "content": "\\boldsymbol {T} _ {i, j} = \\boldsymbol {b} _ {j} + \\mathcal {T} \\left(x _ {i, j}; \\Psi\\right) \\in \\mathbb {R} ^ {t}, \\tag {3}", + "image_path": "d744b1c13f37ff56106565f2e4ea019e84225de34f2718e162eac86475307dcf.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\cdot)" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " is the feature tokenizer module, which transforms the input feature vector " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " to a token embedding " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "T_{i,j}\\in \\mathbb{R}^t" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " . " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " is the dimension of token embedding. " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "\\pmb{b}_{j}" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " -th feature bias. " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " can be implemented with different forms. " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "text", + "content": " represents the learnable parameters of " + }, + { + "bbox": [ + 308, + 422, + 566, + 480 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "content": "In AutoInt [62], both the categorical and numerical features are embedded into low-dimensional spaces, which reduces the dimension of the input features and meanwhile allows different types of features to interact with each other. The embeddings of categorical features are computed by multiplying the embedding matrix with the multi-hot vector, while a corresponding embedding vector represents numerical features. TabTransformer [63] embed each categorical feature into a parametric embedding of dimension " + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "content": " using Column embedding. An embedding vector is assigned to each feature, and a set of embeddings is constructed for all categorical features. Unlike TabTransformer, SAINT [70] proposes projecting numerical features into a " + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "content": "-dimensional space before passing their embedding through the transformer encoder. FT-Transformer [33] adapts the Transformer architecture for tabular data, where all features are transformed to embeddings and applies a stack of Transformer layers to the embeddings. Specifically, the numerical tokenizer is implemented as the element-wise multiplication " + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "inline_equation", + "content": "\\boldsymbol{T}_i^{\\mathrm{num}} = \\boldsymbol{b}_i^{\\mathrm{num}} + x_i^{\\mathrm{num}} \\cdot \\boldsymbol{W}_i^{\\mathrm{num}}" + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "content": ", and the categorical tokenizer is implemented as the lookup table " + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "inline_equation", + "content": "\\boldsymbol{T}_i^{\\mathrm{cat}} = \\boldsymbol{b}_i^{\\mathrm{cat}} + \\boldsymbol{e}_i^T \\boldsymbol{W}_i^{\\mathrm{cat}}" + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "inline_equation", + "content": "\\boldsymbol{e}_i^T" + }, + { + "bbox": [ + 307, + 481, + 566, + 748 + ], + "type": "text", + "content": " is a one-hot vector for the corresponding categorical feature. Other transformer-based" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 41, + 301, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 41, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 44, + 41, + 301, + 65 + ], + "type": "text", + "content": "methods, like [65], [72], [230], [215], use the same feature tokenizer as FT-Transformer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 79, + 151, + 91 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 79, + 151, + 91 + ], + "spans": [ + { + "bbox": [ + 45, + 79, + 151, + 91 + ], + "type": "text", + "content": "5.2 Feature Selection" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 93, + 301, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 93, + 301, + 163 + ], + "spans": [ + { + "bbox": [ + 44, + 93, + 301, + 163 + ], + "type": "text", + "content": "The high dimensionality of tabular data often causes overfitting, where the model focuses on irrelevant features and neglects the important ones. Feature selection reduces the number of features, retaining only the most valuable information. This helps prevent overfitting, improves generalization, and reduces computational complexity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 164, + 301, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 164, + 301, + 266 + ], + "spans": [ + { + "bbox": [ + 44, + 164, + 301, + 266 + ], + "type": "text", + "content": "Traditional tree-based models facilitate automatic feature selection by evaluating the impact of each feature on the target during the construction process. Decision trees utilize metrics such as information gain or the Gini index for feature selection, while ensemble methods like random forests determine feature importance by assessing each feature's contribution [251], [252], [253]. Recently, modern deep learning methods for tabular data often mimic trees' structures for feature selection." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 267, + 301, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 267, + 301, + 393 + ], + "spans": [ + { + "bbox": [ + 44, + 267, + 301, + 393 + ], + "type": "text", + "content": "GrowNet [59] and NODE [60] primarily mimic ensemble techniques. Inspired by GBDT, GrowNet designs a framework for building DNNs with multiple weak learners, where each learner's input consists of the original features plus the penultimate layer output from the previous learner. NODE uses a differentiable Oblivious Decision Tree as the base model, applying Bagging within each layer and Stacking across layers in a multi-layered structure. To make GAM [254] scalable and effective, NODE-GAM [61] modifies NODE to be a GAM, allowing GAM to learn quick, nonlinear jumps that better match patterns in real data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 394, + 301, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 394, + 301, + 534 + ], + "spans": [ + { + "bbox": [ + 44, + 394, + 301, + 534 + ], + "type": "text", + "content": "TabNet [105] and GRANDE [193] focus more on how tree models handle features. TabNet not only retains the representation learning capabilities of DNNs through self-supervised learning, but also incorporates the interpretability of tree models and the benefits of sparse feature selection, with a model structure designed for both feature selection and computation. GRANDE argues that the hard splits used by tree models are a key advantage over deep models, and thus proposes a method for learning hard, axis-aligned tree ensembles using gradient descent. GRANDE combines the beneficial inductive bias of axis-aligned splits with the flexibility provided by gradient descent optimization." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 547, + 154, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 547, + 154, + 559 + ], + "spans": [ + { + "bbox": [ + 45, + 547, + 154, + 559 + ], + "type": "text", + "content": "5.3 Feature Projection" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 562, + 300, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 562, + 300, + 643 + ], + "spans": [ + { + "bbox": [ + 44, + 562, + 300, + 643 + ], + "type": "text", + "content": "Feature projection methods aim to project the raw data into a middle form, enhancing the representation ability for later architectures. Feature projection methods can be broadly categorized into two main approaches: MLP variants and special designed architectures. These approaches aim to enhance the model's ability to represent complex features for underlying feature structures." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "type": "text", + "content": "MLP Variants. For model architecture, RTDL [33] investigates both ResNet-like and Transformer-based architectures tailored for tabular data, proposing simple yet effective adaptations of these widely-used deep models. In particular, the MLP architecture is constructed by stacking multiple blocks consisting of Linear layers, ReLU activations, and Dropout, which transform the raw tabular features into a fixed-dimensional hidden representation. A final linear layer is then used as the classification head. The paper highlights" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 42, + 565, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 565, + 77 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 565, + 77 + ], + "type": "text", + "content": "an important insight: with proper hyperparameter tuning, even simple architectures like MLP and ResNet can achieve competitive performance on tabular benchmarks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 77, + 566, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 77, + 566, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 77, + 566, + 249 + ], + "type": "text", + "content": "Another contemporaneous work [50] enhances the MLP architecture by equipping it with a comprehensive suite of modern regularization techniques. Instead of introducing architectural innovations, this study focuses on systematically exploring combinations of 13 different regularization methods to identify an effective \"regularization cocktail\" for plain MLPs. The results demonstrate two key findings: (i) a well-regularized vanilla MLP can significantly outperform many recent, specialized neural architectures designed for tabular data; and (ii) such MLPs can even surpass strong traditional machine learning models like XGBoost across a range of benchmarks. For a more comprehensive strategy, RealMLP [34] explores multiple aspects including preprocessing, hyperparameters, architecture, regularization, and initialization." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 250, + 567, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 250, + 567, + 389 + ], + "spans": [ + { + "bbox": [ + 308, + 250, + 567, + 389 + ], + "type": "text", + "content": "Special Designed Architectures. For units, motivated by the observation that normalization techniques are prone to disturbances during training, SNN [52] proposes the Scaled Exponential Linear Unit (SELU) to improve deep models for tabular data. NAMs [255] uses exp-centered (ExU) hidden units to improve the learnability for fitting jumpy functions. BiSHop [58] uses a dual-component approach, sequentially processing data both column-wise and row-wise through two interconnected directional learning modules. They use layers of generalized sparse modern Hopfield layers, a sparse extension of the modern Hopfield model with learnable sparsity." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 403, + 421, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 403, + 421, + 414 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 421, + 414 + ], + "type": "text", + "content": "5.4 Feature Interaction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "spans": [ + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "type": "text", + "content": "Feature interaction methods aim to model relationships among features to enhance the representation power of deep learning models on tabular data. In tabular datasets, each sample " + }, + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "type": "text", + "content": " is described by " + }, + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 308, + 416, + 565, + 486 + ], + "type": "text", + "content": " features, and the goal is to transform these raw features into enriched representations that improve predictive performance." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 487, + 565, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 487, + 565, + 509 + ], + "spans": [ + { + "bbox": [ + 308, + 487, + 565, + 509 + ], + "type": "text", + "content": "The general form for feature interaction methods can be expressed as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 394, + 509, + 565, + 522 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 509, + 565, + 522 + ], + "spans": [ + { + "bbox": [ + 394, + 509, + 565, + 522 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {i} = f \\left(\\mathcal {H} \\left(\\boldsymbol {x} _ {i}; \\Theta\\right)\\right), \\tag {4}", + "image_path": "e77a8beab13b7e0d0e9ecbd709d979618031cd2624a214a28ac3e617bc4ed51b.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "spans": [ + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": " is the input feature vector for a single instance, " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\cdot)" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": " is the feature interaction module, which transforms the input " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": " by capturing feature dependencies or generating higher-order feature interactions. " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": " represents the learnable parameters of " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": " is the prediction head that maps the transformed representation to the final output " + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 308, + 527, + 564, + 597 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 597, + 564, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 597, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 308, + 597, + 564, + 665 + ], + "type": "text", + "content": "Feature interaction methods can be broadly categorized into two main approaches: the design of automatic feature interaction modules and the mining of implicit feature relationships. These approaches aim to enhance the model's ability to learn complex feature interactions and underlying feature structures within tabular data." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 666, + 564, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 666, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 308, + 666, + 564, + 723 + ], + "type": "text", + "content": "Automatic Feature Interaction Modules. These methods do not assume specific feature types within the tabular dataset. Instead, they focus on improving the feature interaction process, enabling the model to learn complex, high-order feature relationships autonomously." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 723, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 723, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 723, + 565, + 746 + ], + "type": "text", + "content": "DCNv2 [54] improves the learning of the model's feature interaction by improving the \"Cross Network\" structure. It" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 204 + ], + "type": "text", + "content": "employs low-rank methods to approximate feature crosses in subspaces and then integrates these subspaces using a gating mechanism. AutoInt [62] maps the original sparse high-dimensional feature vectors into a low-dimensional space and models high-order feature interactions by stacking interaction layers with a multi-head attention mechanism. Unlike AutoInt, the TabTransformer[63] only maps categorical features into contextual embeddings and feeds them into a Transformer model, while numerical continuous features are directly concatenated with the interacted contextual embeddings. When tabular data contains only numerical features, TabTransformer behaves in an MLP-like manner. Conversely, when the data contains only categorical features, TabTransformer operates similarly to AutoInt." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 204, + 301, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 204, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 44, + 204, + 301, + 262 + ], + "type": "text", + "content": "Implicit Feature Relationships. Methods in this category typically assume that features in tabular data can be abstracted into implicit types and that it is necessary to design a suitable feature learning process to adapt to the characteristics of different types of features." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 262, + 301, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 301, + 482 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 301, + 482 + ], + "type": "text", + "content": "DANets [55] propose the existence of underlying feature groups in tabular data, where features within each group are correlated. They learn to group input features and perform further feature abstraction. SwitchTab [49] introduces the idea of extracting sample-specific \"Salient Features\" and sample-shared \"Mutual Information\" in tabular features. It leverages self-supervised learning to assist in learning feature representations. ExcelFormer [65] argues that while DNN assigns weights to each feature, it does not actively exclude irrelevant features. To address this, it introduces Semi-Permeable Attention for feature interaction, which allows features with lower information content to access information from more informative features while preventing highly informative features from being influenced by less relevant ones. AMFormer [215] proposes the hypothesis that arithmetic feature interactions are crucial for deep tabular models. Based on the Transformer architecture, it introduces components designed to extract both additive and multiplicative interaction information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 498, + 299, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 498, + 299, + 510 + ], + "spans": [ + { + "bbox": [ + 45, + 498, + 299, + 510 + ], + "type": "text", + "content": "6 FROM SPECIALIZED TO TRANSFERABLE MODEL" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 513, + 301, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 513, + 301, + 582 + ], + "spans": [ + { + "bbox": [ + 44, + 513, + 301, + 582 + ], + "type": "text", + "content": "Instead of training a tabular model from scratch, learning based on a Pre-Trained Model (PTM) may increase the learning efficacy and reduce the resource and data requirement. For example, in a house prices prediction task, training a regressor in a certain area may benefit from a well-trained predictor from its neighborhood." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": "Learning by reusing the PTM usually contains two stages. The first is the pre-training of a tabular model, from one or more upstream tasks. Given the PTM and a downstream task, an adaptation strategy is needed to transform the PTM to the target task or facilitate the learning of the target model. Formally, a well-trained model " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "g_{\\Theta}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": " is often available and can be leveraged to facilitate the training of " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "g_{\\Theta}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": " is pre-trained on a dataset " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "\\mathcal{D}' = \\{(x_j', y_j')\\}_{j=1}^{N'}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": " with instances " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "x_j' \\in \\mathbb{R}^{d'}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": " and labels " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "y_j' \\in [C']" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": ". To reuse expert knowledge in " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "g_{\\Theta}" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": ", an adaptation strategy is applied: " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "f_{\\theta} = \\text{Adapt}(f_{\\theta_0} \\mid \\mathcal{D}, g_{\\Theta})" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "inline_equation", + "content": "\\theta_0" + }, + { + "bbox": [ + 44, + 582, + 302, + 748 + ], + "type": "text", + "content": " is the initialization of the model. The notation could also be extended to cases with more than one PTM. The main challenge to reuse one or more PTMs is to bridge the gap between the PTM and the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "target tabular model [256]. We categorize PTMs into three kinds based on the source of PTM " + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "inline_equation", + "content": "g_{\\Theta}" + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "spans": [ + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "content": "Homogeneous Transferable Tabular Model. First, the PTM may come from the same form of task (with " + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "inline_equation", + "content": "d' = d" + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "inline_equation", + "content": "C' = C" + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "content": ", but with different distributions " + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "inline_equation", + "content": "\\operatorname{Pr}(\\mathcal{D}') \\neq \\operatorname{Pr}(\\mathcal{D})" + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "content": " or model families " + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "inline_equation", + "content": "g \\neq f" + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "content": "). For example, those pre-trained from other domains [71], or those unlabeled instances [48], [70]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "spans": [ + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": "Heterogeneous Transferable Tabular Model. In addition, we consider a PTM pre-trained from a slightly different task with " + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": ". In addition to the previous difference, the PTM " + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "inline_equation", + "content": "g_{\\Theta}" + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": " may differ from " + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": " in feature dimension " + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "inline_equation", + "content": "(d' \\neq d)" + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": " or target class set " + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "inline_equation", + "content": "(C' \\neq C)" + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": ", so the adaptation method " + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "inline_equation", + "content": "\\mathbf{Adapt}(\\cdot)" + }, + { + "bbox": [ + 307, + 124, + 565, + 193 + ], + "type": "text", + "content": " must handle such heterogeneity [64], [230]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 193, + 566, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 193, + 566, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 193, + 566, + 297 + ], + "type": "text", + "content": "Cross-Modal Transferable Tabular Model. Moreover, the pre-trained model could also be constructed from another modality, such as vision and language domains. The cross-modality PTM is hard to be applied to the tabular prediction task in most cases, so auxiliary information from the tabular task like the semantic meaning of attributes (i.e., the attribute names) are usually assumed to be available in this case, where PTM like large language models may provide the latent semantic meanings as external knowledge [77], [73]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 297, + 566, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 566, + 552 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 566, + 552 + ], + "type": "text", + "content": "The main limitation of the transferable tabular model is the assumption that the data distribution of the well-trained model should be similar to the distribution of the target model. For example in the previous house price prediction task, if the PTM is pre-trained in an area distance from the target area and targets diverse problems, it is hard to utilize the PTM in the target task [222]. Since different tabular tasks may vary in their distribution, feature, or classes, the general assumption is their exist a common \"dimension\" between the PTM and the target task. Only the distribution changes under the shared dimension and classes, or there exists an overlap between the feature or class spaces [230]. For example, in real-world applications such as healthcare, there are numerous medical diagnostic tables. These tables usually have some features in common such as blood type and blood pressure. For rare diseases with limited data, knowledge transfer from other diagnostic tables with overlapping features becomes beneficial [228]. When the feature/label semantics are available, two different tasks may be linked through the semantic space, and textual PTMs can be used to map the tabular instance to this space or facilitate the prediction in this space [80]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 552, + 566, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 552, + 566, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 552, + 566, + 668 + ], + "type": "text", + "content": "Pros and Cons of transferable Models. Learning with a well-trained tabular model has several advantages based on the knowledge encoded in the PTM. First, the training efficiency of the target model is improved and the model may converge fast, as the PTM may provide better initialization weights or optimization paths. Then, the target model will reduce the requirement on the data size, i.e., learning with a few-shot dataset. Training based on a PTM also reduces the number of learnable parameters, leading to parameter-efficient tuning and reducing computational resources." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 684, + 526, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 684, + 526, + 696 + ], + "spans": [ + { + "bbox": [ + 308, + 684, + 526, + 696 + ], + "type": "text", + "content": "6.1 Homogeneous Transferable Tabular Model" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 700, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 700, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 700, + 566, + 748 + ], + "type": "text", + "content": "Adapting a tabular model from another domain with different distributions is investigated in the field of unsupervised domain adaptation before the era of deep learning. One representative method is the biased regularization, which" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 49, + 294, + 156 + ], + "blocks": [ + { + "bbox": [ + 51, + 49, + 294, + 156 + ], + "lines": [ + { + "bbox": [ + 51, + 49, + 294, + 156 + ], + "spans": [ + { + "bbox": [ + 51, + 49, + 294, + 156 + ], + "type": "image", + "image_path": "66966f1b48254c69ec49709de32610ec85ebc14caea1a2cea39c1e73a8debf60.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 166, + 303, + 226 + ], + "lines": [ + { + "bbox": [ + 44, + 166, + 303, + 226 + ], + "spans": [ + { + "bbox": [ + 44, + 166, + 303, + 226 + ], + "type": "text", + "content": "Figure 5: Illustration of homogeneous transferable tabular methods. The pre-trained model could be constructed from supervised learning or self-supervised learning, which includes masked language model, contrastive pre-training, and hybrid methods." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 230, + 301, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 230, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 44, + 230, + 301, + 255 + ], + "type": "text", + "content": "minimizes the difference between the weights of the PTM and the target model, i.e.," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 258, + 301, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 258, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 45, + 258, + 301, + 285 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {W}} \\ell (\\boldsymbol {W}) + \\| \\boldsymbol {W} - \\boldsymbol {W} ^ {\\prime} \\| _ {F} ^ {2} = \\min _ {\\Delta \\boldsymbol {W}} \\ell \\left(\\Delta \\boldsymbol {W} + \\boldsymbol {W} ^ {\\prime}\\right) + \\| \\Delta \\boldsymbol {W} \\| _ {F} ^ {2}. \\tag {5}", + "image_path": "12771ab1437687a7b7f213e1ce16fc681b1d4d6741ca4a2572a2e60c0e8323f5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "spans": [ + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "\\ell(W)" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": " is the loss function on the current weights " + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "W'" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": ", and the regularize constraint the distance between the target model " + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": " and the PTM weights " + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "W'" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": ". We can reformulate the learning objective as learning the weights residual " + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "\\Delta W" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": ". Biased regularization can be extended to the case where " + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 44, + 285, + 301, + 400 + ], + "type": "text", + "content": " are deep neural networks such as MLP, but it fails when the target model has a different architecture with the PTM. In this case, instead of matching two models through their weights, matching their predictions also helps. For example, twice learning [253] and knowledge distillation [257]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 401, + 301, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 401, + 301, + 470 + ], + "spans": [ + { + "bbox": [ + 44, + 401, + 301, + 470 + ], + "type": "text", + "content": "Benefiting from the strong capacity of deep neural networks, some recent studies focus on pre-training a tabular model from unsupervised instances, and then adapting the model via fine-tuning the PTM on the target (even few-shot) labeled examples. This strategy could be applied in standard supervised learning or semi-supervised learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 470, + 301, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 470, + 301, + 574 + ], + "spans": [ + { + "bbox": [ + 44, + 470, + 301, + 574 + ], + "type": "text", + "content": "Supervised Pre-training Objectives. A straightforward way to incorporate the target variable into the pre-training is by using the input corruption as an augmentation for the standard supervised learning objective. [71] identifies practices to pre-train tabular deep learning models that can be universally applied to different datasets and architectures. They show that using the object target labels during the pre-training stage benefits the downstream performance and advocates several target-aware pre-training objectives." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 574, + 301, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 574, + 301, + 620 + ], + "spans": [ + { + "bbox": [ + 44, + 574, + 301, + 620 + ], + "type": "text", + "content": "Self-Supervised Pre-training Objectives. The self-supervised pre-training objectives can be mainly categorized into three categories, including the masked language model, contrastive pre-training, and hybrid methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 620, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 620, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 620, + 301, + 746 + ], + "type": "text", + "content": "Masked Language Model (MLM). MLM is the unsupervised pre-training objective, where a random subset of features is masked for each sample, and the masked values are predicted in a multi-target classification manner [63]. VIME [48] estimates mask vectors from corrupted tabular data and reconstructs feature vectors for self-supervised learning. They use the trained encoder to generate multiple augmented samples for each data point by masking each point using several different masks and then imputing the corrupted values for each masked data point. SubTab [46] finds that reconstructing the data from the subset of its features rather" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 42, + 566, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 135 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 135 + ], + "type": "text", + "content": "than its corrupted version in an autoencoder setting can better capture its underlying latent representation. SEFS [221] reconstructs the original input based on a randomly selected subset of input features, and simultaneously estimates the gate vector that defines which features are selected or not. MET [223] uses a concatenation of representations for all features instead of averaging and uses adversarial reconstruction loss in addition to the standard loss." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "type": "text", + "content": "Contrastive Pre-training. Contrastive pre-training uses data augmentations to generate positive pairs or two different augmented views of a given example, and the loss function encourages a feature extractor to map positive pairs to similar features. The key factor in contrastive learning is to generate positive and negative versions of a given instance " + }, + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "type": "text", + "content": ". [70] utilizes CutMix [258] in the input space and Mixup [259] in the embedding space to obtain positive pairs, where other instances " + }, + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "type": "inline_equation", + "content": "x_{j \\neq i}" + }, + { + "bbox": [ + 307, + 152, + 567, + 430 + ], + "type": "text", + "content": " are treated as negative ones. SCARF [47] generates a view for a given input by selecting a random subset of its features and replacing them with random draws from their respective empirical marginal distributions. STab [224] relies on two (or multiple) weight-sharing neural networks with different regularizations applied to a single input. By exploiting the stop-gradient operation technique, STab can model invariance with respect to more complicated regularizations while it will not collapse to an undesired trivial solution. DoRA [226] incorporates domain knowledge, training by intra-sample pretext task and inter-sample contrastive learning to learn contextualized representations. DACL+ [220], to overcome the reliance on a particular domain, uses Mixup noise to create similar and dissimilar examples by mixing data samples differently either at the input or hidden-state levels." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 446, + 567, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 446, + 567, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 446, + 567, + 748 + ], + "type": "text", + "content": "Hybrid Methods. [222] explores several pre-training strategies including both supervised and unsupervised ones. It considers MLM as the unsupervised pre-training objective, and sets multi-label classification as the supervised pre-training objective. By fine-tuning the PTM with several choices, including those with frozen feature extractor or not, the paper observes that supervised pre-training leads to more transferable features in the tabular domain. LFR [227] conducts pretraining by learning to simultaneously reconstruct multiple randomly generated projection functions. It considers diverse data types to show the wide-ranging applicability of learning from randomness, including tabular, vision, and language. ReConTab [225] utilizes both self-supervised learning and semi-supervised learning. It uses regularization techniques for raw feature selection and leverages contrastive learning with labels to distill the most pertinent information for downstream tasks. [71] focuses on the setup with fully labeled tabular datasets to understand if pretraining helps tabular deep learning in a fully supervised setting and compares pretraining methods to the strong supervised baselines. They show that using the object target labels during the pertaining stage is beneficial for the downstream performance and advocate several target-aware pretraining objectives. [256] provides a systematic review and summarizes the recent progress and challenges of self-supervised learning for non-sequential tabular data." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 49, + 298, + 162 + ], + "blocks": [ + { + "bbox": [ + 53, + 49, + 298, + 162 + ], + "lines": [ + { + "bbox": [ + 53, + 49, + 298, + 162 + ], + "spans": [ + { + "bbox": [ + 53, + 49, + 298, + 162 + ], + "type": "image", + "image_path": "0143006dcbf94506f23c4872c5ddb3dfd3a832c6465dc6e88fc3fcdfe7b31008.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 171, + 302, + 231 + ], + "lines": [ + { + "bbox": [ + 44, + 171, + 302, + 231 + ], + "spans": [ + { + "bbox": [ + 44, + 171, + 302, + 231 + ], + "type": "text", + "content": "Figure 6: Illustration of heterogeneous transferable tabular methods. During pre-training on one or multiple datasets, most of the parameters in the PTM are trained. For downstream tasks, only a small subset of parameters is fine-tuned while the rest remain fixed." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 239, + 265, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 239, + 265, + 251 + ], + "spans": [ + { + "bbox": [ + 44, + 239, + 265, + 251 + ], + "type": "text", + "content": "6.2 Heterogeneous Transferable Tabular Model" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "spans": [ + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "type": "text", + "content": "The main intuition lies in the mapping " + }, + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 44, + 258, + 301, + 316 + ], + "type": "text", + "content": " work in a similar fashion, i.e., predicting the labels with similar mechanisms. Therefore, the main idea to transfer knowledge is to match the target model with the well-trained one, over the weight space or the prediction space." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "spans": [ + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "text", + "content": "Early methods mainly focus on the feature-level heterogeneity between " + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "text", + "content": ". One main assumption is that there exists a shared set of features between the pre-trained task " + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{D}'" + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "text", + "content": " and the target task " + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 44, + 316, + 301, + 525 + ], + "type": "text", + "content": ", then we may directly copy the weights corresponding to the shared features from the PTM. Some methods extend bias regularization to deal with heterogeneous feature spaces by padding the weights with zero. OPID [260] is a one-pass learning approach, which only needs to scan each instance once and to deal with evolving streams. In the pre-training stage, OPID compresses important information of vanished features into functions of survived features, and in the adaptation stage, it is expanded to include the augmented features. ReForm [261] learns the meta-representation for each feature and based on which calculates the relationship between features in the meta-representation space. ReForm then bridges the feature space gap through optimal transport, which could be further used to transform classifiers with different features and classes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 525, + 301, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 525, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 44, + 525, + 301, + 631 + ], + "type": "text", + "content": "A major advantage of neural models is that they are easily fine-tuned in new domains and learn reusable features. For example, as the deep PTM has the ability to extract generalizable features for a tabular task, reusing the knowledge from the PTM can utilize the strategies designed for visual and language domains. In detail, we can fix most of the parameters in the PTM and tune the remaining parts which only have limited parameters, for example, the linear probing or parameter-efficient fine-tuning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "type": "text", + "content": "Reuse PTM Pre-trained from One Dataset. These methods primarily focus on the difference between the pre-trained and down-streaming datasets. TabRet [72] utilizes masked autoencoding to make the transformer work in downstream tasks. To transfer pre-trained large language models to tabular tasks, ORCA [73] trains an embedder to align the source and target distributions. TabToken [64] focuses on improving the quality of the feature tokens, which are an important component in tabular deep models. TabToken leverages a conditional contrastive loss to improve the" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "quality of learned embeddings and demonstrates enhanced transferability of deep learning models for tabular data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 65, + 564, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 65, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 65, + 564, + 274 + ], + "type": "text", + "content": "Pseudo-Feature method [222] utilizes pseudo-feature models individually for each new feature. In detail, given one additional feature in a downstream dataset, it first pretrains a model on the upstream data without that feature. Then Pseudo-Feature fine-tunes the pre-trained model on downstream data to predict values in the column absent from the upstream data. Next, the fine-tuned model is used back in the upstream datasets to predict and assign pseudo-values of this feature. After supplementing the upstream dataset with the \"unseen\" feature in the downstream task, PseudoFeature pre-trains and transfers the feature extractor to the downstream task again. This method is computationally expensive in our broader feature space adaptation scenario. Reuse PTM Pre-trained from Multiple Datasets. XTab [230] aims to enhance the transferability of the transformer. They address the challenge of inconsistent column types and quantities among tables by utilizing independent features and federated learning to pre-train the shared component." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 274, + 565, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 274, + 565, + 515 + ], + "spans": [ + { + "bbox": [ + 307, + 274, + 565, + 515 + ], + "type": "text", + "content": "Another thread of method learns shared components such as attribute-agnostic transformation across datasets, which provides a good model initialization for partial parameters given a downstream task. [228] infers latent representations of each attribute and each response from a few labeled instances using an inference network. The attribute and response representations are enabled make predictions based on the task-specific properties of attributes and responses even when attribute and response sizes are different across tasks. DEN [229] uses a three-block architecture: a covariate transformation block followed by a distribution embedding block and then a classification block. They provide theoretical insights to show that this architecture allows the embedding and classification blocks to be fixed after pre-training on a diverse set of tasks. Meta-Transformer [231] leverages a frozen encoder to perform multimodal perception without any paired multimodal training data. In Meta-Transformer, the raw input data from various modalities are mapped into a shared space in meta learning [262], allowing a subsequent encoder with frozen parameters to extract high-level semantic features." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 532, + 511, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 532, + 511, + 544 + ], + "spans": [ + { + "bbox": [ + 308, + 532, + 511, + 544 + ], + "type": "text", + "content": "6.3 Reusing a Pre-trained Language Model" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "spans": [ + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "text", + "content": "In some cases, the semantic meaning of features is available, making it natural to leverage pre-trained language models for tabular data. Typically, two types of semantic information can be derived from a tabular dataset " + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "text", + "content": ". First, attribute names for each of the " + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "text", + "content": " features, " + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = A_{1},\\ldots ,A_{d}" + }, + { + "bbox": [ + 307, + 547, + 564, + 640 + ], + "type": "text", + "content": ", provide useful context. Additionally, meta-information such as a textual description, denoted as meta_description, can further enhance understanding. The learning process is then formulated as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 365, + 647, + 565, + 660 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 647, + 565, + 660 + ], + "spans": [ + { + "bbox": [ + 365, + 647, + 565, + 660 + ], + "type": "interline_equation", + "content": "\\hat {y} _ {i} = f \\left(\\boldsymbol {x} _ {i}, \\mathcal {A} \\mid \\mathcal {D}, \\text {m e t a} _ {\\text {d e s c r i p t}}\\right) \\tag {6}", + "image_path": "6d4e9319095dcfc6e9b34428b411b9cf86fb7fec061631cdcce9c8f786e2f101.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 666, + 566, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 666, + 566, + 700 + ], + "spans": [ + { + "bbox": [ + 307, + 666, + 566, + 700 + ], + "type": "text", + "content": "where the semantic information bridges the gap between feature spaces and facilitates knowledge transfer from pretrained tasks to downstream applications." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 700, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 700, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 700, + 566, + 748 + ], + "type": "text", + "content": "Although pre-trained language models have demonstrated success in various domains, their application to tabular data remains limited due to the prevalence of numerical values and the scarcity of textual descriptions." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 252, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 44, + 289, + 159 + ], + "blocks": [ + { + "bbox": [ + 50, + 44, + 289, + 159 + ], + "lines": [ + { + "bbox": [ + 50, + 44, + 289, + 159 + ], + "spans": [ + { + "bbox": [ + 50, + 44, + 289, + 159 + ], + "type": "image", + "image_path": "886d5839208dbb126a86d29a0904962b263e4a1c1cdbafa3fd800eb2090af5e9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 167, + 301, + 214 + ], + "lines": [ + { + "bbox": [ + 44, + 167, + 301, + 214 + ], + "spans": [ + { + "bbox": [ + 44, + 167, + 301, + 214 + ], + "type": "text", + "content": "Figure 7: Illustration of transferable tabular methods with a language model. The language model can be applied at various stages, including feature tokenization, feature engineering, and textual serialization." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 223, + 301, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 223, + 301, + 269 + ], + "spans": [ + { + "bbox": [ + 44, + 223, + 301, + 269 + ], + "type": "text", + "content": "Moreover, concerns about data privacy and security may further restrict access to semantic information. Consequently, language models are typically applied to tabular datasets only when textual context is sufficiently available." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 270, + 301, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 270, + 301, + 409 + ], + "spans": [ + { + "bbox": [ + 44, + 270, + 301, + 409 + ], + "type": "text", + "content": "Language Models for Feature Tokenization. When the feature space changes, language-based methods assume that semantic relationships exist between feature descriptions and rely on large-scale language models to capture these connections. For example, the feature \"occupation\" in one task may share semantic similarity with the feature \"organization\" in another, allowing feature-label relationships to be reused across different datasets. By extracting feature embeddings (tokens), tables of varying sizes can be transformed into a standardized set of tokens in a shared space. A pre-trained transformer then encodes transferable knowledge, aiding the fine-tuning process for downstream tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 410, + 301, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 410, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 44, + 410, + 301, + 594 + ], + "type": "text", + "content": "TransTab [77] trains a tokenizer based on the words present in tabular data and incorporates both column descriptions and table cells as raw input to a gated transformer model. The model is pre-trained via self-supervised learning or supervised contrastive loss and is validated on tasks such as transfer learning and feature incremental learning. PTab [232] adopts a similar approach, learning contextual representations from multiple tokenized tabular datasets before fine-tuning for downstream tasks. UniTabE [182] encodes and fuses information from column names, data types, and cell values into a set of tokens, applying an encoder-decoder architecture with Transformer and LSTM components. It is pre-trained using Multi-Cell-Masking and contrastive learning, where a sub-vector of an instance is treated as a positive sample while other instances or their subsets are considered negatives." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 595, + 301, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 595, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 44, + 595, + 301, + 734 + ], + "type": "text", + "content": "CM2 [79] introduces a cross-table pre-training framework that integrates attribute names and feature values. CM2 uses transformers to process feature tokens and employs a prompt-based Masked Table Modeling (pMTM) self-supervised objective, where column names act as prompts to assist in predicting masked features. TP-BERTa [78] follows a similar approach but incorporates numerical discretization strategies and magnitude tokenization for feature encoding, fine-tuning smaller pre-trained language models such as RoBERTa [263] for tabular data prediction. Its pre-training objective includes supervised loss and magnitude-aware triplet loss as a regularizer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 734, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 734, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 59, + 734, + 301, + 746 + ], + "type": "text", + "content": "CARTE [233] utilizes a graph representation of tabular" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 42, + 566, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 157 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 157 + ], + "type": "text", + "content": "data to handle heterogeneous feature spaces, transforming textual information from column names and entries into embeddings. A graph-attentional network is then applied to contextualize entries with column names and neighboring entries. CARTE is pre-trained on the YAGO3 knowledge base [264] by constructing graphlets for tabular data and employing contrastive loss, where the original graphlet and one truncated variant are positives, while other graphlets in the batch serve as negatives. The pre-trained CARTE model is subsequently fine-tuned for downstream tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 157, + 567, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 157, + 567, + 353 + ], + "spans": [ + { + "bbox": [ + 308, + 157, + 567, + 353 + ], + "type": "text", + "content": "Language Models for Feature Engineering. Discriminative features enhance the effectiveness of subsequent tabular learning models. Binder [234] identifies task input components that are not directly answerable by a model and leverages LLMs to generate auxiliary features, particularly for knowledge grounding tasks. Given that discriminative features are often manually designed, CAAFE [265] explores the use of LLMs to generate auxiliary features based on task and feature semantics. The quality of these features is then evaluated using a general tabular model, TabPFN [89]. FeatLLM [266] enhances feature generation by incorporating example-based prompting, enabling LLMs to create new features based on textual descriptions. TaPTaP [235] is expected to capture a generic tabular data distribution after ongoing pre-training on a large-scale corpus of real-world tabular data, generating high-quality synthetic tables to support various applications on tabular data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 354, + 566, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 566, + 537 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 566, + 537 + ], + "type": "text", + "content": "Language Models for Textual Serialization. A direct approach to incorporating pre-trained language models involves converting tabular data into a textual format, allowing LLMs to infer relationships between features and labels based on embedded expert knowledge. This concept has been validated in semantic parsing tasks [267], [268]. LIFT [236] and TabLLM [80] serialize tabular data by integrating feature names into text and combining them with task descriptions. This enables LLMs to treat tabular prediction tasks as text generation problems. LIFT fine-tunes models on the entire training set, while TabLLM employs few-shot learning for fine-tuning. UniPredict [237] constructs prompts using metadata, sample serialization, and task instructions, fine-tuning LLMs with confidence-weighted augmented labels predicted by an external model. The approach is validated on multiple in-distribution datasets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 537, + 566, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 566, + 631 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 566, + 631 + ], + "type": "text", + "content": "Despite their advantages, textual serialization methods face challenges when the number of features increases, as prompts may become too large to fit within the model's context window. The effectiveness of LLMs in tabular data tasks remains constrained by the availability of semantic information and the capabilities of external tabular models. Further exploration of LLM-based methods will be discussed in the general tabular models in Section 7." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 639, + 495, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 639, + 495, + 651 + ], + "spans": [ + { + "bbox": [ + 308, + 639, + 495, + 651 + ], + "type": "text", + "content": "6.4 Reusing a Pre-trained Vision Model" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 654, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 654, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 654, + 566, + 746 + ], + "type": "text", + "content": "Given the success of deep neural networks (DNNs) in visual tasks, it is intuitive to leverage the strong recognition capabilities of pre-trained vision models for tabular data. Additionally, data augmentation strategies commonly used in image processing can be introduced after transforming tabular data into a visual format. Similar ideas have been explored in time series forecasting [269] and irregular time series classification [270]." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 45, + 298, + 144 + ], + "blocks": [ + { + "bbox": [ + 50, + 45, + 298, + 144 + ], + "lines": [ + { + "bbox": [ + 50, + 45, + 298, + 144 + ], + "spans": [ + { + "bbox": [ + 50, + 45, + 298, + 144 + ], + "type": "image", + "image_path": "1cb1d949fe6300f2c81dade78b72b5132ea16551e7587e0e4a4975de40932726.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 153, + 301, + 200 + ], + "lines": [ + { + "bbox": [ + 44, + 153, + 301, + 200 + ], + "spans": [ + { + "bbox": [ + 44, + 153, + 301, + 200 + ], + "type": "text", + "content": "Figure 8: Illustration of transferable tabular methods with a vision model. Tabular data can be transformed into images through dimensionality reduction, table reorganization, and the use of image markers." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 211, + 301, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 211, + 301, + 338 + ], + "spans": [ + { + "bbox": [ + 44, + 211, + 301, + 338 + ], + "type": "text", + "content": "The primary challenge lies in representing tabular instances in an image-compatible format. In natural images, neighboring pixels often share semantic relationships, whereas tabular data lacks inherent spatial structure. Features in a tabular instance are permutation-invariant, meaning that exchanging their order does not alter the instance's meaning. Various methods have been proposed to transform tabular data into visual representations, enabling the application of pre-trained vision models fine-tuned for tabular tasks. This subsection highlights different transformation strategies that transfer tabular datasets into images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 339, + 301, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 339, + 301, + 467 + ], + "spans": [ + { + "bbox": [ + 44, + 339, + 301, + 467 + ], + "type": "text", + "content": "Dimensionality Reduction Transformation. Visualization strategies for tabular data naturally convert tables into images by embedding high-dimensional features into a lower-dimensional space. DeepInsight [238] projects tabular data into a 2D space using t-SNE and constructs images through convex hull analysis, applying translation, rotation, quantization, and normalization. REFINED [239] employs Bayesian Metric Multidimensional Scaling to preserve pairwise distances within the low-dimensional representation, ensuring that structurally similar features remain proximate in the transformed image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 468, + 301, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 468, + 301, + 664 + ], + "spans": [ + { + "bbox": [ + 44, + 468, + 301, + 664 + ], + "type": "text", + "content": "Table Reorganization Transformation. A tabular dataset " + }, + { + "bbox": [ + 44, + 468, + 301, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 44, + 468, + 301, + 664 + ], + "type": "text", + "content": " can be treated as a matrix and represented as a single-channel image or kernel. To enable visual PTMs to recognize meaningful spatial relationships, different strategies have been developed for structuring tabular data into images. Tabular Convolution (TAC) [240] arranges data samples into zero-mean square matrices (kernels) of odd integer dimensions. These kernels are then convolved with a fixed \"base image,\" and the resulting images are subsequently fed to a CNN for classification. Image Generator for Tabular Data (IGTD) [74] and TabEye [75] share a similar idea, generating an image for each data sample where pixel intensities correspond directly to feature values. These methods prioritize placing similar features in close proximity but struggle with high-dimensional tabular tasks. LM-IGTD [241] extends IGTD by incorporating stochastic feature generation to enhance robustness and generalization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 666, + 301, + 748 + ], + "type": "text", + "content": "Image Marker Transformation. Another approach involves encoding feature values as visual markers within an image. Super-TML [242] assigns feature values to predetermined positions within an image, effectively handling categorical and numerical datasets. Tab2Visual [76] normalizes tabular data and represents each instance as a row of multiple bars, each corresponding to a specific value. Each feature" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 566, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 65 + ], + "type": "text", + "content": "is assigned a unique color to enhance visual differentiation, while bar widths are proportional to feature magnitudes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "spans": [ + { + "bbox": [ + 308, + 65, + 564, + 125 + ], + "type": "text", + "content": "By transforming tabular data into images, these methods enable the application of powerful pre-trained vision models to tabular prediction tasks, leveraging established deep learning techniques from the vision domain to enhance tabular model performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 140, + 549, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 140, + 549, + 153 + ], + "spans": [ + { + "bbox": [ + 309, + 140, + 549, + 153 + ], + "type": "text", + "content": "7 FROM TRANSFERABLE TO GENERAL MODEL" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "spans": [ + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "text", + "content": "The general model (also referred to as the tabular foundation model) represents an advancement over the transferable model. It extends the generalization capabilities of a pretrained tabular model to a variety of heterogeneous downstream tabular tasks, regardless of their diverse feature and class spaces, without requiring additional fine-tuning. In other words, given a pre-trained model " + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "inline_equation", + "content": "g_{\\Theta}" + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "text", + "content": ", it can be directly applied to a downstream tabular task " + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "text", + "content": " to predict the label of a test instance " + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "inline_equation", + "content": "x^{*}" + }, + { + "bbox": [ + 307, + 156, + 566, + 261 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 397, + 268, + 565, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 268, + 565, + 281 + ], + "spans": [ + { + "bbox": [ + 397, + 268, + 565, + 281 + ], + "type": "interline_equation", + "content": "\\hat {y} ^ {*} = g _ {\\Theta} \\left(\\boldsymbol {x} ^ {*} \\mid \\mathcal {D}\\right). \\tag {7}", + "image_path": "4905a666543e6173e42b6e66fa5fa25130cc3fda26fc641421ab2bcfc9714cc7.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "type": "text", + "content": "Thus, the general model shares similarities with the transferable tabular model, but with a greater emphasis on the \"zero-shot\" ability, aims to construct highly adaptive architectures capable of handling a wide array of heterogeneous datasets simultaneously. Importantly, it does not require an Adapt function, which further reduces the computational cost of hyper-parameter tuning. The goal of the general tabular model is to achieve better generalization on downstream tabular datasets " + }, + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "type": "text", + "content": " when compared to alternative strategies, such as training a tabular model directly on " + }, + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 307, + 287, + 566, + 415 + ], + "type": "text", + "content": " or adapting a transferable model." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 417, + 566, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 417, + 566, + 579 + ], + "spans": [ + { + "bbox": [ + 309, + 417, + 566, + 579 + ], + "type": "text", + "content": "Remark 6. Distinguishing between an advanced transferable tabular model, pre-trained on a wide range of heterogeneous tabular tasks, and the general tabular model can be challenging. Some transferable tabular models, based on auxiliary feature semantics, are able to predict labels for downstream test instances directly [80]. However, their prediction ability is constrained and typically applicable only in specific areas after fine-tuning [78], [233]. The general tabular model, on the other hand, is designed to handle a wider range of heterogeneous tabular tasks, sharing similar pre-training challenges with transferable models but without utilizing additional semantics. Fine-tuning a pre-trained general model is also an option for further performance improvements [93], [96]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 584, + 567, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 584, + 567, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 584, + 567, + 748 + ], + "type": "text", + "content": "Pre-training has revolutionized domains such as vision and language [271], [84], but its adoption in tabular data remains limited due to the inherent heterogeneity of tabular datasets. Tabular datasets can vary significantly in both dimensionality (i.e., the number of columns) and the semantic meaning of each dimension, even within the same application. For example, different healthcare datasets may capture varying levels of detail and aspects of patient information. Even within the same feature entry (e.g., the " + }, + { + "bbox": [ + 307, + 584, + 567, + 748 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 307, + 584, + 567, + 748 + ], + "type": "text", + "content": "-th column), the meaning can vary (e.g., \"age\" vs. \"height\"). This contrasts with vision and text data (within the same language), where different data sources typically share the same \"vocabulary\" (e.g., pixels, patches, or sub-words) and similar relationships between vocabulary \"elements\" (e.g., neighboring pixels" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 43, + 171, + 128 + ], + "blocks": [ + { + "bbox": [ + 47, + 43, + 171, + 128 + ], + "lines": [ + { + "bbox": [ + 47, + 43, + 171, + 128 + ], + "spans": [ + { + "bbox": [ + 47, + 43, + 171, + 128 + ], + "type": "image", + "image_path": "2810f4f79d0742a7f04b257adc7a6d771ff30010de75e71b2e070205dd3e7735.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 135, + 301, + 196 + ], + "lines": [ + { + "bbox": [ + 44, + 135, + 301, + 196 + ], + "spans": [ + { + "bbox": [ + 44, + 135, + 301, + 196 + ], + "type": "text", + "content": "Figure 9: Illustration of general methods. These methods handle inherent heterogeneity by improving the model's adaptability or homogenizing the diverse tabular formats. Once pre-trained, they can be directly applied to downstream tasks without fine-tuning." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 44, + 301, + 128 + ], + "blocks": [ + { + "bbox": [ + 173, + 44, + 301, + 128 + ], + "lines": [ + { + "bbox": [ + 173, + 44, + 301, + 128 + ], + "spans": [ + { + "bbox": [ + 173, + 44, + 301, + 128 + ], + "type": "image", + "image_path": "0c3b348377cdacba43d6c9c27c9890a6b7801bef55e3b56fb722125736d11ff9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 200, + 301, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 200, + 301, + 247 + ], + "spans": [ + { + "bbox": [ + 44, + 200, + 301, + 247 + ], + "type": "text", + "content": "often share colors). The lack of shared vocabulary and relationships in tabular data makes it challenging to jointly train a model across multiple datasets, let alone apply a pre-trained model directly to new downstream tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 247, + 301, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 247, + 301, + 363 + ], + "spans": [ + { + "bbox": [ + 44, + 247, + 301, + 363 + ], + "type": "text", + "content": "There are two main strategies to address the inherent heterogeneity in tabular datasets: improving the model's adaptability or homogenizing the diverse tabular formats. We categorize general tabular models into three parts based on their strategies for achieving generalizability. The first focuses on raw-feature-based approaches, among which TabPFN variants represent a rapidly evolving branch and are thus discussed separately. The third category encompasses semantic-based methods that leverage attribute and task semantics to unify heterogeneous tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 378, + 228, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 378, + 228, + 389 + ], + "spans": [ + { + "bbox": [ + 45, + 378, + 228, + 389 + ], + "type": "text", + "content": "7.1 Raw-Feature-based General Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 392, + 300, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 392, + 300, + 508 + ], + "spans": [ + { + "bbox": [ + 44, + 392, + 300, + 508 + ], + "type": "text", + "content": "To adapt a general tabular model to heterogeneous tabular datasets during the pre-training and fine-tuning stages, two main strategies can be used from the data-centric and model-centric perspectives. From the data-centric perspective, the general model may standardize tabular datasets into a homogeneous form. For instance, TabPTM [86] transforms all datasets into a uniform format using meta-representation to enable pre-training. The pre-trained model can then be applied directly to a downstream dataset or fine-tuned without introducing additional parameters." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 509, + 301, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 509, + 301, + 636 + ], + "spans": [ + { + "bbox": [ + 44, + 509, + 301, + 636 + ], + "type": "text", + "content": "Alternatively, from the model-centric perspective, the general model may improve adaptability by tailoring it to specific tabular tasks. HyperFast [87] adopts the concept of a Hyper Network [272] in meta-learning [273], where a mapping from the tabular dataset to the weights of a classifier is learned. This mapping can then be used to predict labels for unseen instances from the task. To address datasets with varying dimensions, HyperFast projects datasets into a fixed size using random projections. To overcome the slow weight generation speed, MotherNet accelerates HyperFast by modifying its architecture with Transformer-like modules [88]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 651, + 145, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 651, + 145, + 661 + ], + "spans": [ + { + "bbox": [ + 45, + 651, + 145, + 661 + ], + "type": "text", + "content": "7.2 TabPFN Variants" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "text", + "content": "The TabPFN family of models [89], [91] leverages the incontext learning capabilities of transformers, directly predicting labels by adapting test instances according to the context of training examples. In the first version of TabPFN, an instance " + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i" + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "text", + "content": " is padded to a fixed dimension (e.g., 100), and the features are projected to a higher dimension (e.g., " + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "inline_equation", + "content": "d'" + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "text", + "content": ") for further processing. The label " + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 44, + 665, + 301, + 748 + ], + "type": "text", + "content": " is processed similarly and" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": "added to the instance embeddings. The embeddings of all " + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "inline_equation", + "content": "N + 1" + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": " instances, including training and test instances, are formulated into a set of " + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "inline_equation", + "content": "N + 1" + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": " tokens with " + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "inline_equation", + "content": "d'" + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": " dimensions. These tokens are processed through several layers of a Transformer, and the output token corresponding to the test instance is further predicted using a 10-way classifier. TabPFN is pretrained over synthetically generated datasets with structured causal models (SCM) [274] and Bayesian Neural Networks (BNNs) [275], [276], enabling the strong in-context learning ability, with the best checkpoint selected based on some real-world datasets. Due to the high complexity of transformers, TabPFN is limited to small-scale tasks, with suggested sizes of " + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "inline_equation", + "content": "N < 1000" + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "inline_equation", + "content": "d < 100" + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "inline_equation", + "content": "C < 10" + }, + { + "bbox": [ + 307, + 42, + 566, + 191 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "spans": [ + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "content": "TabPFN v2 introduces a specialized feature tokenizer to better handle heterogeneity. Specifically, each cell in the table is projected to a " + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "content": "-dimensional vector using a shared mapping, and random position encoding vectors are added to differentiate features [277]. This results in a tensor of size " + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "inline_equation", + "content": "(N + 1) \\times (d + 1) \\times k" + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "content": " when there is a single test instance. The label of each instance is processed similarly, and the mapped " + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "content": "-dimensional token is concatenated with the instance tokens. A dummy label (e.g., the average of all labels) is used for the test instance since its label is unknown. A two-way attention mechanism is used, with each feature attending to the other features in its row and then attending to the same feature across its column [278]. The output token corresponding to the label of the test instance is further mapped to a 10-class classifier or regressor. Several improvements have been made in TabPFN v2, including increased context size (" + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "inline_equation", + "content": "N < 10000" + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "inline_equation", + "content": "d < 500" + }, + { + "bbox": [ + 307, + 192, + 567, + 445 + ], + "type": "text", + "content": "), automatic feature engineering, and post-hoc ensemble methods. [279] analyzes TabPFN from a bias-variance perspective, shedding light on its generalization capabilities. Various applications have also been explored, including tabular data generation [280], anomaly detection [281], data augmentation [282], and time series forecasting [283]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 446, + 566, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 446, + 566, + 469 + ], + "spans": [ + { + "bbox": [ + 308, + 446, + 566, + 469 + ], + "type": "text", + "content": "The improvements of TabPFN (especially TabPFN v1) stem from several aspects." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 469, + 566, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 469, + 566, + 701 + ], + "spans": [ + { + "bbox": [ + 307, + 469, + 566, + 701 + ], + "type": "text", + "content": "Pre-training Improvements. TabForestPFN [284] extends TabPFN by pre-training In-Context Learning (ICL)-transformers on a new forest dataset generator that creates unrealistic datasets with complex decision boundaries. TabDPT [179] pre-trains the architecture on real-world datasets using self-supervised learning and retrieval objectives, making it suitable for both classification and regression tasks. APT [285] is pre-trained utilizing adversarial synthetic data generated by adaptive agents, which systematically modify the underlying data-generating distribution and deliberately challenge the model with diverse synthetic datasets to enhance its robustness and generalization capabilities. TabICL [286] integrates tree-based SCMs using XGBoost [130] to model complex interactions and employs curriculum learning by progressively increasing synthetic dataset sizes. Scalable Improvements. The efficiency of TabPFN is highly sensitive to context size, prompting strategies to enhance scalability and performance [39]. These include compressing training data into a compact learned representation using sketching [287] or prompt tuning techniques [288], [289]," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 708, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 708, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 708, + 566, + 746 + ], + "type": "text", + "content": "1. Some variants of TabPFN are not considered general tabular models, especially the latter parts, as they require additional fine-tuning steps. We place them in this subsection due to their strong relationship with TabPFN." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 100 + ], + "type": "text", + "content": "employing adaptive data selection methods to identify the most pertinent training examples for each test instance [290], [90], [179], [291], and replacing traditional quadratic attention with computationally efficient linear attention mechanisms [292] and state-space models (SSMs) [293]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 101, + 301, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 101, + 301, + 228 + ], + "spans": [ + { + "bbox": [ + 44, + 101, + 301, + 228 + ], + "type": "text", + "content": "Adaptation Improvements. Some approaches improve TabPFN's performance on downstream tasks by adapting the context [90] or fine-tuning specific parts of the model [96], [284], [290], [289]. TabICL [286] employs a column-then-row attention mechanism to construct fixed-dimensional embeddings of rows, which are subsequently processed by a transformer like TabPFN v1 to facilitate efficient in-context learning. EquiTabPFN [294] introduces self-attention across target components, ensuring that the arbitrary ordering of target dimensions does not influence model predictions, enhancing the performance of TabPFN v1 to some extent." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 244, + 218, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 244, + 218, + 255 + ], + "spans": [ + { + "bbox": [ + 45, + 244, + 218, + 255 + ], + "type": "text", + "content": "7.3 Semantics-based General Models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 260, + 302, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 260, + 302, + 526 + ], + "spans": [ + { + "bbox": [ + 44, + 260, + 302, + 526 + ], + "type": "text", + "content": "By leveraging the semantic structure of tabular data, such as column names, heterogeneous tasks can be projected into a shared language space. This allows a single language model, pre-trained on diverse tabular datasets, to handle unseen tasks in a unified manner. TabuLa-8B [92] fine-tunes a Llama 3-8B LLM for tabular data prediction (classification and binned regression) using a novel packing and attention scheme for tabular prediction. GTL [93] transforms tabular datasets into an instruction-oriented language format, facilitating the continued pre-training of LLMs on instruction-oriented tabular data, which demonstrates strong performance in few-shot scenarios. GTL-S [295] unlocks the potential of GTL from a scaling perspective, revealing that scaling datasets and prediction tasks enhance generalization. [94] extends GTL by incorporating retrieval-augmented LLMs for tabular data, combined with retrieval-guided instruction-tuning for LLMs. MediTab [243] uses a data engine that leverages LLMs to consolidate tabular samples to overcome the barrier across tables with distinct schema. MediTab aligns out-domain data with the target task using a \"learn, annotate, and refinement\" pipeline, enabling the pre-trained model to infer for arbitrary tabular input in the domain without fine-tuning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 544, + 224, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 544, + 224, + 556 + ], + "spans": [ + { + "bbox": [ + 45, + 544, + 224, + 556 + ], + "type": "text", + "content": "8 TABULAR ENSEMBLE METHODS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 561, + 301, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 561, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 44, + 561, + 301, + 643 + ], + "type": "text", + "content": "Ensemble learning is a natural way to improve the generalization ability of multiple base learners by leveraging their diversity. Classical methods such as Random Forest [127] and AdaBoost [126], [296] employ bagging and boosting, respectively, by ensembling multiple decision trees. These methods have proven effective for tabular data, as they reduce bias/variance and improve robustness [297]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "type": "text", + "content": "In deep tabular learning, ensemble methods can be categorized into two primary approaches: joint-training ensembles, where multiple sub-networks are aggregated within a single training pipeline, and post-hoc ensembles, where the predictions from multiple pre-trained deep tabular models are fused. One major challenge in ensembling deep tabular methods is computational efficiency, as training multiple deep models or sub-models can be computationally expensive and time-consuming." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 42, + 451, + 54 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 42, + 451, + 54 + ], + "spans": [ + { + "bbox": [ + 309, + 42, + 451, + 54 + ], + "type": "text", + "content": "8.1 Joint-Training Ensembles" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 59, + 566, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 59, + 566, + 152 + ], + "spans": [ + { + "bbox": [ + 307, + 59, + 566, + 152 + ], + "type": "text", + "content": "Joint-training ensemble methods integrate diverse model architectures within a single training process to improve predictive performance while maintaining efficiency. These architectures often combine different types of models, such as linear and non-linear models [28] or tree-based and deep neural network-based approaches [63]. Tree-mimic methods leverage this concept by mixing predictions from multiple tree nodes to enhance robustness [60], [59], [193]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 152, + 567, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 567, + 302 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 567, + 302 + ], + "type": "text", + "content": "To improve efficiency while maintaining predictive power, various techniques have been explored. Some approaches employ parameter-efficient ensembles, such as TabM [176], which uses MLPs as base learners and incorporates BatchEnsemble [298] to generate multiple diverse base learners efficiently. This prevents a large increase in the number of learnable parameters while maintaining model diversity. Similarly, BETA leverages pre-trained TabPFN by generating multiple base learners through additional parameter tuning [96]. Specifically, BETA learns multiple feature projections, feeding the projected training sets into TabPFN and aggregating the results while applying BatchEnsemble to reduce the number of additional learnable parameters." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 303, + 567, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 303, + 567, + 397 + ], + "spans": [ + { + "bbox": [ + 308, + 303, + 567, + 397 + ], + "type": "text", + "content": "Some hybrid approaches, such as LLM-Boost and PFN-Boost, have been developed to integrate large language models and TabPFN with gradient-boosted decision trees [299]. In these approaches, LLMs and PFN serve as the initial base learners, and additional base learners are sequentially trained in a boosting manner. This approach leverages the strong prior knowledge from LLMs and TabPFN while maintaining the scalability of gradient-boosted decision trees." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 416, + 430, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 416, + 430, + 427 + ], + "spans": [ + { + "bbox": [ + 309, + 416, + 430, + 427 + ], + "type": "text", + "content": "8.2 Post-Hoc Ensembles" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 434, + 567, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 567, + 571 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 567, + 571 + ], + "type": "text", + "content": "Post-hoc ensemble (PHE) methods involve combining multiple trained models to improve robustness and accuracy. Bagging-based ensembles are one of the most direct post-hoc strategies, where usually multiple models trained with different random seeds are aggregated [33], [69]. Although this approach improves model robustness, it incurs high computational overhead. Some recent studies have demonstrated that LLM-based methods exhibit diverse prediction behaviors compared to deep tabular models that do not utilize attribute names [94]. This difference in prediction styles enhances their complementarity, making them ideal candidates for ensemble methods." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 573, + 567, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 573, + 567, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 573, + 567, + 746 + ], + "type": "text", + "content": "Instead of explicitly training multiple models, perturbation-based approaches create diverse predictions from the same pre-trained model. One such method applies feature permutation with TabPFN, leveraging the fact that TabPFN is not fully feature permutation-invariant [89]. A perturbation-based ensemble can be formed by randomly permuting the feature order in both the training and test sets and making predictions multiple times, generating multiple diverse predictors without additional training costs. TabPFN v2 introduces additional perturbations to enhance diversity among several key factors, including variations in feature encoding, feature quantization, categorical feature shuffling, SVD-based feature compression, outlier removal, and power transformations such as the Yeo-Johnson transformation [91]. These randomly selected transformations create diverse" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 65 + ], + "type": "text", + "content": "prediction patterns, enabling effective ensemble learning without requiring multiple separately trained models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 66, + 301, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 66, + 301, + 240 + ], + "spans": [ + { + "bbox": [ + 44, + 66, + 301, + 240 + ], + "type": "text", + "content": "Another post-hoc ensemble strategy employed in TabPFN v2 is the use of Portfolio-Based Ensemble, where a fixed set of TabPFN configurations is used [91]. A greedy ensemble selection technique is then applied to learn optimal weights for aggregating the predictions of different configurations [300]. By combining multiple perturbed models, this method improves generalization without excessive training costs. Some methods apply ensemble techniques to TabPFN v1 to handle large datasets. For instance, TabPFN-Bagging [96], [301] divides large datasets into multiple context groups, with the final results averaged to mitigate variance. BoostPFN [301] treats TabPFN v1 as weak learners, where each weak learner uses a subset of the training data as context. This approach allows BoostPFN to outperform standard Prior Fitted Networks (PFNs) on large datasets." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 262, + 129, + 275 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 262, + 129, + 275 + ], + "spans": [ + { + "bbox": [ + 45, + 262, + 129, + 275 + ], + "type": "text", + "content": "9 EXTENSIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 282, + 299, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 282, + 299, + 305 + ], + "spans": [ + { + "bbox": [ + 44, + 282, + 299, + 305 + ], + "type": "text", + "content": "In this section, we briefly introduce some extensions on deep tabular methods across different complex tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 306, + 301, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 306, + 301, + 468 + ], + "spans": [ + { + "bbox": [ + 44, + 306, + 301, + 468 + ], + "type": "text", + "content": "Clustering. Traditional clustering approaches often leverage enhanced distance metrics, such as the Gower distance [302], which is specifically designed for mixed data types, and interpretable prototypes, such as K-medoids. Recent advances in tabular data clustering have sought to integrate interpretability constraints with deep representation learning. For example, IDC [97] introduces a deep learning framework for general tabular data that predicts interpretable cluster assignments at both the instance and cluster levels. To address overlapping clusters, TableDC [98] integrates the Mahalanobis distance, which accounts for variance and correlation within the data. This method provides a similarity measure suitable for tables, rows, or columns in high-dimensional latent spaces." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 468, + 301, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 468, + 301, + 642 + ], + "spans": [ + { + "bbox": [ + 44, + 468, + 301, + 642 + ], + "type": "text", + "content": "Anomaly Detection. Anomaly detection in tabular data is crucial for identifying subtle irregularities in structured datasets, such as fraudulent transactions or equipment failures. While classical techniques like Isolation Forest [303] and Local Outlier Factor [304] remain foundational, recent developments have incorporated various methods to capture contextual relationships in high-dimensional data. For instance, [305] introduces a method that learns mappings that maximize mutual information between each sample and the part that is masked out, capturing the structural nuances of samples from a single training class. ADBench [99] provides a comprehensive tabular anomaly detection benchmark with 30 algorithms and 57 benchmark datasets. Additionally, large language models (LLMs) have also been employed for anomaly detection in tabular data [306]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 643, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 643, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 643, + 301, + 746 + ], + "type": "text", + "content": "Tabular Generation. Tabular data generation has become an essential tool for synthetic data creation, privacy preservation, and addressing data scarcity. Traditional methods, such as Bayesian networks or GANs, focus on mimicking marginal distributions, while recent advancements emphasize preserving complex feature dependencies and semantic consistency. For instance, tabular diffusion models [307] iteratively refine synthetic data to capture subtle correlations in high-dimensional datasets, outperforming GANs in terms of data" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 42, + 564, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 134 + ], + "type": "text", + "content": "fidelity. [308] introduces high-order structural causal information as a natural prior knowledge and offers a benchmark framework for evaluating tabular synthesis models. Despite these advances, challenges remain in balancing realism with privacy, such as avoiding identity leakage in sensitive datasets, and scaling to heterogeneous data types. Hybrid neuro-symbolic models [309] bridge this gap to provide trustworthy synthetic data for downstream tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 135, + 564, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 135, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 135, + 564, + 308 + ], + "type": "text", + "content": "Interpretability. Traditional gradient-boosted decision trees (GBDTs) inherently provide interpretability through feature importance scores and decision path visualization. Frameworks such as XGBoost [130] and LightGBM [131] quantify feature importance using metrics like split frequency and information gain. SHAP values [310] enable instance-level explanations by decomposing model predictions into feature contributions. The additive nature of GBDTs allows for partial dependence plots [311] to visualize feature effects while controlling for interactions. NeC4.5 [253], a novel decision tree algorithm that integrates the comprehensibility of decision trees with the generalization ability of neural network ensembles. By training a neural network ensemble to generate a new training set, NeC4.5 enhances decision tree performance while maintaining interpretability." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 308, + 564, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 564, + 538 + ], + "type": "text", + "content": "Recent deep models specifically designed for tabular data have introduced novel interpretability mechanisms. For example, NAMs [255] combine some of the expressivity of DNNs with the inherent intelligibility of generalized additive models. They learn a linear combination of neural networks that each attend to a single input feature, which are trained jointly and can learn arbitrarily complex relationships between their input feature and the output. TabNet [105] uses sequential attention with learnable feature masks, where each decision step explicitly selects a subset of features via sparse masking. The aggregated feature usage across steps provides global interpretability comparable to GBDT's feature importance. Subsequent variants, such as TabTransformer [63], enhance interpretability by visualizing cross-feature attention patterns. FT-Transformer [33] combines feature tokenization with explainable attention, while NODE [60], NODE-GAM [61] and DOFEN [312] generalize ensembles of oblivious decision trees, benefiting from both end-to-end gradient-based optimization and multi-layer hierarchical representation learning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 539, + 564, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 539, + 564, + 711 + ], + "spans": [ + { + "bbox": [ + 308, + 539, + 564, + 711 + ], + "type": "text", + "content": "Open-Environment Tabular Machine Learning. Research on distribution shifts in tabular data starts with domain-to-domain shifts [110], which are commonly categorized based on the availability of target domain data. When target data is available, transfer learning techniques such as unsupervised domain adaptation [313] and test-time adaptation [314] are widely used. These methods adapt model parameters using test-time inputs but rely on access to target distributions, which may not always be feasible. In contrast, when target data is unavailable, a more practical but challenging scenario, methods aiming to enhance robustness and generalization, using approaches such as domain generalization [315], domain robustness [316], [317], label robustness [318] or ensemble strategies [95]. TableShift [110] provides a detailed analysis of this scenario." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 712, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 565, + 746 + ], + "type": "text", + "content": "Beyond domain-to-domain shifts, temporal shifts are more general and complex. TabReD [109] emphasizes the inherent temporality of real-world tabular data, advocating" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 216 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 216 + ], + "type": "text", + "content": "for temporal splits for training and testing. [319] further propose a refined training protocol focusing on temporal evaluation, significantly improving generalization across models. To address temporal shifts, it's critical to incorporate temporal information [319]. Drift-Resilient TabPFN [174] models temporal shifts with a secondary SCM, which specifies changes in the primary model parameters. [319] introduce a plug-and-play temporal embedding that effectively captures trend and periodicity patterns, providing an adaptive mechanism to mitigate the impact of temporal shifts. Under temporal shift conditions, most methods experience performance degradation, while TabM [95] exhibits relative robustness [109]. However, [319] demonstrate that with the refined training protocol and temporal embedding, methods such as ModernNCA [35] can regain competitiveness." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 223, + 301, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 223, + 301, + 408 + ], + "spans": [ + { + "bbox": [ + 44, + 223, + 301, + 408 + ], + "type": "text", + "content": "Multi-modal Learning with Tabular Data. Text, such as feature names, can be effectively utilized to enhance tabular data learning, as discussed in Section 6. Here, we focus on interactions with the image modality, e.g., in healthcare, where medical images require specialized equipment and expert knowledge, often in tabular form, for accurate diagnosis [320]. To tackle challenges like large medical datasets and high annotation costs, MMCL [106] uses a contrastive self-supervised learning framework that integrates images and tabular data. CHARMS [107] transfers expert knowledge from tabular data to images, improving image predictions even without tabular data during inference, thus reducing reliance on costly expert annotations. TIP [321] proposes a self-supervised learning strategy with a tabular encoder for incomplete, heterogeneous data and a multimodal interaction module for inter-modality representation learning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 415, + 301, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 415, + 301, + 589 + ], + "spans": [ + { + "bbox": [ + 44, + 415, + 301, + 589 + ], + "type": "text", + "content": "Tabular Understanding. Tabular understanding involves comprehending the information contained within a table and can be broken down into several tasks. For example, Table Detection (TD) [322], [323] refers to identifying the region of the image that contains the table while Table Structure Recognition (TSR) [324], [325] involves the identification of the rows and columns to identify individual table cells, which aims to recognize the cellular structures of tables from table images by extracting the coordinates of cell boxes and row/column spanning information. Table Question Answering (TQA) [326], [327], [112] refers to providing precise answers from tables to answer a user's question. Traditional methods, whether OCR-based [328], [329], [330] or OCR-free [331], [332], [333], [334], [335], have made significant strides in TSR and TD, which are relatively simpler tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "type": "text", + "content": "More complex tasks, such as TQA, have also been the focus of considerable effort. For example, Donut [332] proposes a novel task and a synthetic document image generator to pre-train the model, reducing reliance on large-scale real document images. Monkey and TextMonkey [336], [337] utilize shifted window attention and use similarity measures to filter out redundant tokens. mPLUG-DocOwl [338] adapts mPLUG-Owl for OCR-free document understanding, while TabPedia [335] constructs low- and high-resolution vision encoders with a concept synergy mechanism for visual table understanding. [339] focuses on exploring various table representations and directly prompting LLMs to improve performance. Please refer to [112], [113] for more details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 41, + 406, + 53 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 41, + 406, + 53 + ], + "spans": [ + { + "bbox": [ + 310, + 41, + 406, + 53 + ], + "type": "text", + "content": "10 DISCUSSIONS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 62, + 566, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 62, + 566, + 108 + ], + "spans": [ + { + "bbox": [ + 308, + 62, + 566, + 108 + ], + "type": "text", + "content": "In this section, we discuss several possible future directions for tabular machine learning, particularly in light of the significant potential demonstrated by tabular general/foundation models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 109, + 566, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 109, + 566, + 270 + ], + "spans": [ + { + "bbox": [ + 308, + 109, + 566, + 270 + ], + "type": "text", + "content": "The Ability to Handle Dynamic and Open Environments. Tabular models, particularly foundation models, will increasingly need to operate in dynamic, real-world environments where data evolves over time [340]. One of the key challenges is dealing with imbalanced datasets [155], where certain classes may be underrepresented, and the distribution of data may shift over time [110]. As a result, models need to adapt to these changes and continue providing accurate predictions. Additionally, the emergence of new classes in the data may require the model to evolve and update its predictions in real-time [341]. This calls for methods that ensure tabular foundation models can accommodate evolving data, handling both new classes and changing distributions effectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 271, + 566, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 271, + 566, + 502 + ], + "spans": [ + { + "bbox": [ + 308, + 271, + 566, + 502 + ], + "type": "text", + "content": "The Coverage and Scope of Tabular Foundation Models. Current tabular foundation models have demonstrated strong performance on various unseen classification and regression tasks. However, several important questions remain about their capabilities. For instance, in addition to in-context learning [246], are there other prediction strategies that could be employed to further enhance the versatility and performance of tabular foundation models? Beyond classification and regression, can these models be extended to handle related tasks such as clustering, imputation, outlier detection, or even table-based question answering (QA)? Expanding the task scope could increase the model's utility in a wide range of applications. Furthermore, it is worth investigating whether there is a scaling law [342] for tabular foundation models. Currently, tabular checkpoints are relatively small compared to foundation models in other modalities, such as language or vision. Understanding the implications of scaling these models—particularly the trade-offs between model size and performance—will be crucial for their future development." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 503, + 566, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 503, + 566, + 642 + ], + "spans": [ + { + "bbox": [ + 308, + 503, + 566, + 642 + ], + "type": "text", + "content": "Will Foundation Models Always Help? While foundation models have demonstrated impressive generalization abilities, there are inherent trade-offs. Similar to ensemble learning, a single foundation model may provide an \"average\" predictive ability across tasks, potentially losing specialized expertise for specific tasks. To address this, a promising approach could be the development of a \"tabular model zoo\" [343], [344]. In this paradigm, different pre-trained models, potentially including models from other domains, could be combined for a specific tabular task. Given a new task, suitable pre-trained models could be selected, adapted if necessary, and integrated for improved performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 643, + 567, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 643, + 567, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 643, + 567, + 746 + ], + "type": "text", + "content": "Model Efficiency. In many real-world applications, tabular datasets are large and high-dimensional, posing significant challenges for both training and inference [345], [44]. One area of concern is how to handle extreme cases, such as when the data is exceptionally large or sparse. Foundation models should be able to scale effectively in these scenarios without sacrificing performance. Another issue is inference speed. In large-scale problems, timely predictions are essential, especially when deployed in real-time environments [292]. Opti-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 253, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 134 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 134 + ], + "type": "text", + "content": "mizing the inference process is therefore critical to ensure that predictions can be made quickly on large, complex datasets. Lastly, the computational resources required for training and deploying foundation models can be substantial [346]. Optimizing resource usage through methods such as model pruning, quantization, and efficient training algorithms will be important to ensure that these models remain practical and accessible for a wide range of applications." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 135, + 301, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 135, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 44, + 135, + 301, + 285 + ], + "type": "text", + "content": "Bridging the Gap Between Tabular Data and Other Modalities. Tabular data often coexists with other data modalities, such as images and text. One of the exciting challenges in the field is how to effectively integrate tabular data with foundation models from other domains [347]. Combining the strengths of tabular models with those of vision or language models could result in more powerful and versatile models capable of handling multimodal data. Exploring how to seamlessly integrate these modalities—whether through joint embeddings, cross-modal attention mechanisms, or other techniques—could unlock significant advances in tasks that require both structured tabular data and unstructured data sources like images or text." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 298, + 138, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 298, + 138, + 309 + ], + "spans": [ + { + "bbox": [ + 45, + 298, + 138, + 309 + ], + "type": "text", + "content": "11 CONCLUSION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 313, + 302, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 313, + 302, + 499 + ], + "spans": [ + { + "bbox": [ + 44, + 313, + 302, + 499 + ], + "type": "text", + "content": "Tabular data remains a cornerstone of real-world machine learning applications, and the advancement of deep learning has opened new possibilities for effective representation learning in this domain. In this survey, we present a comprehensive overview of deep tabular representation learning, covering its background, challenges, evaluation benchmarks, and the discussion between tree-based models and DNNs. We systematically categorize existing methods into three categories—specialized, transferable, and general models—based on their generalization capabilities. In addition, we discuss ensemble techniques, extensions, and some promising future directions, such as open-environment and multimodal tabular learning. We hope this survey serves as a valuable reference for understanding the current state of the field and inspires further progress in developing more robust and generalizable tabular learning methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 512, + 115, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 512, + 115, + 523 + ], + "spans": [ + { + "bbox": [ + 45, + 512, + 115, + 523 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 528, + 301, + 744 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 45, + 528, + 301, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 528, + 301, + 555 + ], + "spans": [ + { + "bbox": [ + 45, + 528, + 301, + 555 + ], + "type": "text", + "content": "[1] B. Kovalerchuk and E. Vityaev, Data mining in finance: advances in relational and hybrid methods. Springer Science & Business Media, 2005. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 555, + 301, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 301, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 301, + 592 + ], + "type": "text", + "content": "[2] S. L. Hyland, M. Faltys, M. Hüser, X. Lyu, T. Gumbsch, C. Esteban, C. Bock, M. Horn, M. Moor, B. Rieck et al., \"Early prediction of circulatory failure in the intensive care unit using machine learning,\" Nature medicine, vol. 26, no. 3, pp. 364-373, 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 592, + 301, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 592, + 301, + 620 + ], + "spans": [ + { + "bbox": [ + 46, + 592, + 301, + 620 + ], + "type": "text", + "content": "[3] C. Romero and S. Ventura, \"Educational data mining: a review of the state of the art,\" IEEE Transactions on Systems, Man, and Cybernetics, vol. 40, no. 6, pp. 601-618, 2010. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 620, + 301, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 620, + 301, + 647 + ], + "spans": [ + { + "bbox": [ + 46, + 620, + 301, + 647 + ], + "type": "text", + "content": "[4] X. Amatriain, A. Jaimes, N. Oliver, and J. M. Pujol, \"Data mining methods for recommender systems,\" in Recommender systems handbook. Springer, 2010, pp. 39-71. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 647, + 301, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 647, + 301, + 682 + ], + "spans": [ + { + "bbox": [ + 46, + 647, + 301, + 682 + ], + "type": "text", + "content": "[5] R. Tibshirani, T. Hastie, B. Narasimhan, and G. Chu, \"Diagnosis of multiple cancer types by shrunken centroids of gene expression,\" Proceedings of the National Academy of Sciences, vol. 99, no. 10, pp. 6567-6572, 2002. 1, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 682, + 301, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 682, + 301, + 708 + ], + "spans": [ + { + "bbox": [ + 46, + 682, + 301, + 708 + ], + "type": "text", + "content": "[6] O. Ivanciuc et al., \"Applications of support vector machines in chemistry,\" Reviews in computational chemistry, vol. 23, p. 291, 2007. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 708, + 301, + 744 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 708, + 301, + 744 + ], + "spans": [ + { + "bbox": [ + 46, + 708, + 301, + 744 + ], + "type": "text", + "content": "[7] N. K. Ahmed, A. F. Atiya, N. E. Gayar, and H. El-Shishiny, \"An empirical comparison of machine learning models for time series forecasting,\" Econometric reviews, vol. 29, no. 5-6, pp. 594-621, 2010." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 43, + 566, + 746 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 310, + 43, + 566, + 70 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 43, + 566, + 70 + ], + "spans": [ + { + "bbox": [ + 310, + 43, + 566, + 70 + ], + "type": "text", + "content": "[8] M. R. Allen and D. A. Stainforth, \"Towards objective probabilistic climate forecasting,\" Nature, vol. 419, no. 6903, pp. 228-228, 2002. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 72, + 566, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 72, + 566, + 107 + ], + "spans": [ + { + "bbox": [ + 311, + 72, + 566, + 107 + ], + "type": "text", + "content": "[9] V. Borisov, T. Leemann, K. Seßler, J. Haug, M. Pawelczyk, and G. Kasneci, \"Deep neural networks and tabular data: A survey,\" IEEE Transactions Neural Networks and Learning Systems, vol. 35, no. 6, pp. 7499-7519, 2024. 1, 4, 7, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 311, + 108, + 555, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 108, + 555, + 118 + ], + "spans": [ + { + "bbox": [ + 311, + 108, + 555, + 118 + ], + "type": "text", + "content": "[10] C. C. Aggarwal, Data Mining - The Textbook. Springer, 2015. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 311, + 118, + 564, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 118, + 564, + 135 + ], + "spans": [ + { + "bbox": [ + 311, + 118, + 564, + 135 + ], + "type": "text", + "content": "[11] Z. Ji, Z. C. Lipton, and C. Elkan, \"Differential privacy and machine learning: a survey and review,\" CoRR, vol. abs/1412.7584, 2014. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 311, + 135, + 564, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 135, + 564, + 171 + ], + "spans": [ + { + "bbox": [ + 311, + 135, + 564, + 171 + ], + "type": "text", + "content": "[12] M. F. Delgado, E. Cernadas, S. Barro, and D. G. Amorim, \"Do we need hundreds of classifiers to solve real world classification problems?\" Journal of Machine Learning Research, vol. 15, no. 1, pp. 3133-3181, 2014. 1, 5, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 171, + 566, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 171, + 566, + 189 + ], + "spans": [ + { + "bbox": [ + 311, + 171, + 566, + 189 + ], + "type": "text", + "content": "[13] C. Bishop, Pattern recognition and machine learning. Springer, 2006. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 190, + 566, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 190, + 566, + 217 + ], + "spans": [ + { + "bbox": [ + 311, + 190, + 566, + 217 + ], + "type": "text", + "content": "[14] T. Hastie, R. Tibshirani, and J. H. Friedman, The Elements of Statistical Learning: Data Mining, Inference, and Prediction, 2nd Edition. Springer, 2009. 1, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 217, + 566, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 217, + 566, + 236 + ], + "spans": [ + { + "bbox": [ + 311, + 217, + 566, + 236 + ], + "type": "text", + "content": "[15] M. Mohri, A. Rostamizadeh, and A. Talwalkar, Foundations of Machine Learning. MIT Press, 2012. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 236, + 566, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 236, + 566, + 262 + ], + "spans": [ + { + "bbox": [ + 311, + 236, + 566, + 262 + ], + "type": "text", + "content": "[16] K. P. Murphy, Probabilistic Machine Learning: An introduction, ser. Adaptive computation and machine learning series. MIT Press, 2022. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 311, + 262, + 566, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 262, + 566, + 290 + ], + "spans": [ + { + "bbox": [ + 311, + 262, + 566, + 290 + ], + "type": "text", + "content": "[17] A. Voulodimos, N. Doulamis, A. Doulamis, E. Protopapadakis et al., \"Deep learning for computer vision: A brief review,\" Computational intelligence and neuroscience, vol. 2018, 2018. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 311, + 290, + 564, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 290, + 564, + 327 + ], + "spans": [ + { + "bbox": [ + 311, + 290, + 564, + 327 + ], + "type": "text", + "content": "[18] D. W. Otter, J. R. Medina, and J. K. Kalita, \"A survey of the usages of deep learning for natural language processing,\" IEEE transactions on neural networks and learning systems, vol. 32, no. 2, pp. 604-624, 2020. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 311, + 327, + 564, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 327, + 564, + 361 + ], + "spans": [ + { + "bbox": [ + 311, + 327, + 564, + 361 + ], + "type": "text", + "content": "[19] Y. Bengio, A. Courville, and P. Vincent, \"Representation learning: A review and new perspectives,\" IEEE transactions on pattern analysis and machine intelligence, vol. 35, no. 8, pp. 1798-1828, 2013. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 311, + 363, + 564, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 363, + 564, + 380 + ], + "spans": [ + { + "bbox": [ + 311, + 363, + 564, + 380 + ], + "type": "text", + "content": "[20] Y. LeCun, Y. Bengio, and G. Hinton, \"Deep learning,\" nature, vol. 521, no. 7553, pp. 436-444, 2015. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 311, + 381, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 381, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 311, + 381, + 564, + 399 + ], + "type": "text", + "content": "[21] I. Goodfellow, Y. Bengio, and A. Courville, Deep learning. MIT press, 2016. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 311, + 399, + 564, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 399, + 564, + 426 + ], + "spans": [ + { + "bbox": [ + 311, + 399, + 564, + 426 + ], + "type": "text", + "content": "[22] J. Donahue, Y. Jia, O. Vinyals, J. Hoffman, N. Zhang, E. Tzeng, and T. Darrell, \"Decaf: A deep convolutional activation feature for generic visual recognition,\" in ICML, 2014, pp. 647-655. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 311, + 426, + 566, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 426, + 566, + 453 + ], + "spans": [ + { + "bbox": [ + 311, + 426, + 566, + 453 + ], + "type": "text", + "content": "[23] G. E. Hinton and R. R. Salakhutdinov, \"Reducing the dimensionality of data with neural networks,\" science, vol. 313, no. 5786, pp. 504-507, 2006. 2, 4" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 311, + 454, + 564, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 454, + 564, + 472 + ], + "spans": [ + { + "bbox": [ + 311, + 454, + 564, + 472 + ], + "type": "text", + "content": "[24] J. Weston, F. Ratle, and R. Collobert, \"Deep learning via semi-supervised embedding,\" in ICML, 2008, pp. 1168-1175. 2, 4" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 311, + 472, + 564, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 472, + 564, + 498 + ], + "spans": [ + { + "bbox": [ + 311, + 472, + 564, + 498 + ], + "type": "text", + "content": "[25] L. Van Der Maaten, \"Learning a parametric embedding by preserving local structure,\" in AISTATS, 2009, pp. 384-391. 2, 4" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 311, + 499, + 564, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 499, + 564, + 526 + ], + "spans": [ + { + "bbox": [ + 311, + 499, + 564, + 526 + ], + "type": "text", + "content": "[26] M. R. Min, L. Maaten, Z. Yuan, A. J. Bonner, and Z. Zhang, \"Deep supervised t-distributed embedding,\" in ICML, 2010, pp. 791-798. 2, 4" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 311, + 526, + 564, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 526, + 564, + 554 + ], + "spans": [ + { + "bbox": [ + 311, + 526, + 564, + 554 + ], + "type": "text", + "content": "[27] W. Zhang, T. Du, and J. Wang, \"Deep learning over multi-field categorical data -- A case study on user response prediction,\" in ECIR, 2016, pp. 45-57. 2, 4" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 311, + 554, + 564, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 554, + 564, + 591 + ], + "spans": [ + { + "bbox": [ + 311, + 554, + 564, + 591 + ], + "type": "text", + "content": "[28] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah, \"Wide & deep learning for recommender systems,\" in DLRS, 2016, pp. 7-10. 2, 4, 19" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 311, + 591, + 564, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 591, + 564, + 609 + ], + "spans": [ + { + "bbox": [ + 311, + 591, + 564, + 609 + ], + "type": "text", + "content": "[29] K. G. Mehrotra, C. K. Mohan, H. Huang, K. G. Mehrotra, C. K. Mohan, and H. Huang, Anomaly detection. Springer, 2017. 2, 4" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 311, + 609, + 564, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 609, + 564, + 635 + ], + "spans": [ + { + "bbox": [ + 311, + 609, + 564, + 635 + ], + "type": "text", + "content": "[30] F. O. Isinkaye, Y. O. Folajimi, and B. A. Ojokoh, \"Recommendation systems: Principles, methods and evaluation,\" Egyptian informatics journal, vol. 16, no. 3, pp. 261-273, 2015. 2, 4" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 311, + 636, + 564, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 636, + 564, + 664 + ], + "spans": [ + { + "bbox": [ + 311, + 636, + 564, + 664 + ], + "type": "text", + "content": "[31] S. S. Rangapuram, M. W. Seeger, J. Gasthaus, L. Stella, Y. Wang, and T. Januschowski, \"Deep state space models for time series forecasting,\" in NeurIPS, 2018, pp. 7796-7805. 2, 4" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 311, + 664, + 564, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 664, + 564, + 690 + ], + "spans": [ + { + "bbox": [ + 311, + 664, + 564, + 690 + ], + "type": "text", + "content": "[32] B. Lim and S. Zohren, \"Time-series forecasting with deep learning: a survey,\" Philosophical Transactions of the Royal Society A, vol. 379, no. 2194, p. 20200209, 2021. 2, 4" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 311, + 690, + 564, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 690, + 564, + 718 + ], + "spans": [ + { + "bbox": [ + 311, + 690, + 564, + 718 + ], + "type": "text", + "content": "[33] Y. Gorishniy, I. Rubachev, V. Khrulkov, and A. Babenko, \"Revisiting deep learning models for tabular data,\" in NeurIPS, 2021, pp. 18932-18943. 2, 3, 4, 6, 7, 8, 9, 11, 12, 19, 20" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 311, + 718, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 718, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 311, + 718, + 564, + 746 + ], + "type": "text", + "content": "[34] D. Holzmüller, L. Grinsztajn, and I. Steinwart, \"Better by default: Strong pre-tuned mlp's and boosted trees on tabular data,\" in NeurIPS, 2024, pp. 26577-26658. 2, 4, 5, 7, 9, 12" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 43, + 301, + 71 + ], + "spans": [ + { + "bbox": [ + 47, + 43, + 301, + 71 + ], + "type": "text", + "content": "[35] H.-J. Ye, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, \"Revisiting nearest neighbor for tabular data: A deep tabular baseline two decades later,\" in ICLR, 2025. 2, 3, 4, 9, 10, 21" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "text", + "content": "[36] L. Grinsztajn, E. Oyallon, and G. Varoquaux, \"Why do tree-based models still outperform deep learning on typical tabular data?\" in NeurIPS, 2022, pp. 507-520. 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 100, + 301, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 118 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 118 + ], + "type": "text", + "content": "[37] R. Shwartz-Ziv and A. Armon, \"Tabular data: Deep learning is not all you need,\" Information Fusion, vol. 81, pp. 84-90, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 118, + 301, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 118, + 301, + 144 + ], + "spans": [ + { + "bbox": [ + 47, + 118, + 301, + 144 + ], + "type": "text", + "content": "[38] E. Beyazit, J. Kozaczuk, B. Li, V. Wallace, and B. Fadlallah, \"An inductive bias for tabular deep learning,\" in NeurIPS, 2023, pp. 43108-43135. 2, 7, 11" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 145, + 301, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 145, + 301, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 145, + 301, + 182 + ], + "type": "text", + "content": "[39] D. C. McElfresh, S. Khandagale, J. Valverde, V. P. C., G. Ramakrishnan, M. Goldblum, and C. White, \"When do neural nets outperform boosted trees on tabular data?\" in NeurIPS, 2023, pp. 76336-76369. 2, 5, 6, 7, 8, 18" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 182, + 301, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 182, + 301, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 182, + 301, + 218 + ], + "type": "text", + "content": "[40] H.-J. Ye, D.-C. Zhan, N. Li, and Y. Jiang, \"Learning multiple local metrics: Global consideration helps,\" IEEE transactions on pattern analysis and machine intelligence, vol. 42, no. 7, pp. 1698-1712, 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 219, + 301, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 219, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 47, + 219, + 301, + 255 + ], + "type": "text", + "content": "[41] S. M. Jesus, J. Pombal, D. Alves, A. F. Cruz, P. Saleiro, R. P. Ribeiro, J. Gama, and P. Bizarro, \"Turning the tables: Biased, imbalanced, dynamic tabular datasets for ML evaluation,\" in NeurIPS, 2022, pp. 33563-33575. 2, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 256, + 301, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 256, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 47, + 256, + 301, + 284 + ], + "type": "text", + "content": "[42] R. Kohli, M. Feurer, K. Eggensperger, B. Bischl, and F. Hutter, \"Towards quantifying the effect of datasets for benchmarking: A look at tabular machine learning,\" in ICLR Workshop, 2024. 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 284, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 301, + 319 + ], + "type": "text", + "content": "[43] A. Tschalzev, S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, \"A data-centric perspective on evaluating machine learning models for tabular data,\" in NeurIPS Datasets and Benchmarks Track, 2024. 2, 6, 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 320, + 301, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 301, + 348 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 301, + 348 + ], + "type": "text", + "content": "[44] H.-J. Ye, S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and D.-C. Zhan, \"A closer look at deep learning on tabular data,\" CoRR, vol. abs/2407.00956, 2024. 2, 6, 7, 8, 21" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 348, + 301, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 348, + 301, + 375 + ], + "spans": [ + { + "bbox": [ + 47, + 348, + 301, + 375 + ], + "type": "text", + "content": "[45] Y. Gorishniy, I. Rubachev, and A. Babenko, \"On embeddings for numerical features in tabular deep learning,\" in NeurIPS, 2022, pp. 24991-25004. 2, 4, 8, 9, 11" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 376, + 301, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 376, + 301, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 376, + 301, + 403 + ], + "type": "text", + "content": "[46] T. Ucar, E. Hajiramezanali, and L. Edwards, \"Subtab: Subsetting features of tabular data for self-supervised representation learning,\" in NeurIPS, 2021, pp. 18853-18865. 2, 9, 14" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 404, + 301, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 404, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 404, + 301, + 430 + ], + "type": "text", + "content": "[47] D. Bahri, H. Jiang, Y. Tay, and D. Metzler, \"Scarf: Self-supervised contrastive learning using random feature corruption,\" in ICLR, 2022. 2, 9, 14" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 431, + 301, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 431, + 301, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 431, + 301, + 459 + ], + "type": "text", + "content": "[48] J. Yoon, Y. Zhang, J. Jordon, and M. van der Schaar, \"VIME: extending the success of self- and semi-supervised learning to tabular domain,\" in NeurIPS, 2020, pp. 11.033-11.043. 2, 9, 13, 14" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 459, + 301, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 459, + 301, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 459, + 301, + 495 + ], + "type": "text", + "content": "[49] J. Wu, S. Chen, Q. Zhao, R. Sergazinov, C. Li, S. Liu, C. Zhao, T. Xie, H. Guo, C. Ji, D. Cociorva, and H. Brunzell, \"Switchtab: Switched autoencoders are effective tabular learners,\" in AAAI, 2024, pp. 15924-15933. 2, 7, 9, 13" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 496, + 301, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 496, + 301, + 523 + ], + "spans": [ + { + "bbox": [ + 47, + 496, + 301, + 523 + ], + "type": "text", + "content": "[50] A. Kadra, M. Lindauer, F. Hutter, and J. Grabocka, \"Well-tuned simple nets excel on tabular datasets,\" in NeurIPS, 2021, pp. 23928-23941. 2, 4, 6, 9, 10, 12" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 524, + 301, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 524, + 301, + 542 + ], + "spans": [ + { + "bbox": [ + 47, + 524, + 301, + 542 + ], + "type": "text", + "content": "[51] R. Wang, B. Fu, G. Fu, and M. Wang, \"Deep & cross network for ad click predictions,\" in ADKDD, 2017, pp. 1-7. 2, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 543, + 301, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 543, + 301, + 569 + ], + "spans": [ + { + "bbox": [ + 47, + 543, + 301, + 569 + ], + "type": "text", + "content": "[52] G. Klambauer, T. Unterthiner, A. Mayr, and S. Hochreiter, \"Self-normalizing neural networks,\" in NIPS, 2017, pp. 971-980. 2, 9, 12" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 570, + 301, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 570, + 301, + 589 + ], + "spans": [ + { + "bbox": [ + 47, + 570, + 301, + 589 + ], + "type": "text", + "content": "[53] G. Ke, J. Zhang, Z. Xu, J. Bian, and T.-Y. Liu, \"Tabnn: A universal neural network solution for tabular data,\" 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 589, + 301, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 589, + 301, + 625 + ], + "spans": [ + { + "bbox": [ + 47, + 589, + 301, + 625 + ], + "type": "text", + "content": "[54] R. Wang, R. Shivanna, D. Z. Cheng, S. Jain, D. Lin, L. Hong, and E. H. Chi, \"DCN V2: improved deep & cross network and practical lessons for web-scale learning to rank systems,\" in WWW, 2021, pp. 1785-1797. 2, 7, 9, 12" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 625, + 301, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 625, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 625, + 301, + 654 + ], + "type": "text", + "content": "[55] J. Chen, K. Liao, Y. Wan, D. Z. Chen, and J. Wu, \"Danets: Deep abstract networks for tabular data classification and regression,\" in AAAI, 2022, pp. 3930-3938. 2, 9, 13" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 654, + 301, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 681 + ], + "type": "text", + "content": "[56] J. Chen, K. Liao, Y. Fang, D. Chen, and J. Wu, \"Tabcaps: A capsule neural network for tabular data classification with bow routing,\" in ICLR, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 681, + 301, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 681, + 301, + 709 + ], + "spans": [ + { + "bbox": [ + 47, + 681, + 301, + 709 + ], + "type": "text", + "content": "[57] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, \"Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's,\" in KDD, 2024, pp. 3679-3689. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 709, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 709, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 709, + 301, + 746 + ], + "type": "text", + "content": "[58] C. Xu, Y.-C. Huang, J. Y.-C. Hu, W. Li, A. Gilani, H.-S. Goan, and H. Liu, \"Bishop: Bi-directional cellular learning for tabular data with generalized sparse modern hopfield model,\" in ICML, 2024, pp. 55048-55075. 2, 7, 9, 12" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 565, + 745 + ], + "type": "list", + "angle": 0, + "index": 51, + "blocks": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "spans": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "type": "text", + "content": "[59] S. Badirli, X. Liu, Z. Xing, A. Bhowmik, and S. S. Keerthi, \"Gradient boosting neural networks: Grownet,\" CoRR, vol. abs/2002.07971, 2020. 2, 7, 8, 9, 12, 19" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "type": "text", + "content": "[60] S. Popov, S. Morozov, and A. Babenko, “Neural oblivious decision ensembles for deep learning on tabular data,” in ICLR, 2020. 2, 8, 9, 12, 19, 20" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 99, + 565, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 99, + 565, + 127 + ], + "spans": [ + { + "bbox": [ + 310, + 99, + 565, + 127 + ], + "type": "text", + "content": "[61] C.-H. Chang, R. Caruana, and A. Goldenberg, \"NODE-GAM: neural generalized additive model for interpretable deep learning,\" in ICLR, 2022. 2, 3, 8, 9, 12, 20" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 128, + 565, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 128, + 565, + 156 + ], + "spans": [ + { + "bbox": [ + 310, + 128, + 565, + 156 + ], + "type": "text", + "content": "[62] W. Song, C. Shi, Z. Xiao, Z. Duan, Y. Xu, M. Zhang, and J. Tang, \"Autoint: Automatic feature interaction learning via self-attentive neural networks,\" in CIKM, 2019, pp. 1161-1170. 3, 7, 9, 11, 13" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 156, + 565, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 156, + 565, + 184 + ], + "spans": [ + { + "bbox": [ + 310, + 156, + 565, + 184 + ], + "type": "text", + "content": "[63] X. Huang, A. Khetan, M. Cvitkovic, and Z. S. Karnin, \"Tabransformer: Tabular data modeling using contextual embeddings,\" CoRR, vol. abs/2012.06678, 2020. 3, 7, 8, 9, 11, 13, 14, 19, 20" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 185, + 565, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 185, + 565, + 212 + ], + "spans": [ + { + "bbox": [ + 310, + 185, + 565, + 212 + ], + "type": "text", + "content": "[64] Q.-L. Zhou, H.-J. Ye, L. Wang, and D.-C. Zhan, \"Unlocking the transferability of tokens in deep models for tabular data,\" CoRR, vol. abs/2310.15149, 2023. 3, 9, 13, 15" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 213, + 565, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 213, + 565, + 240 + ], + "spans": [ + { + "bbox": [ + 310, + 213, + 565, + 240 + ], + "type": "text", + "content": "[65] J. Chen, J. Yan, Q. Chen, D. Z. Chen, J. Wu, and J. Sun, \"Can a deep learning model be a sure bet for tabular prediction?\" in KDD, 2024, pp. 288-296. 3, 7, 8, 9, 12, 13" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 241, + 565, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 241, + 565, + 269 + ], + "spans": [ + { + "bbox": [ + 310, + 241, + 565, + 269 + ], + "type": "text", + "content": "[66] A. Jeffares, T. Liu, J. Crabbé, F. Imrie, and M. van der Schaar, \"Tangos: Regularizing tabular neural networks through gradient orthogonalization and specialization,\" in ICLR, 2023. 3, 9, 10" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 270, + 565, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 270, + 565, + 297 + ], + "spans": [ + { + "bbox": [ + 310, + 270, + 565, + 297 + ], + "type": "text", + "content": "[67] H. Ye, W. Fan, X. Song, S. Zheng, H. Zhao, D. dan Guo, and Y. Chang, \"Ptarl: Prototype-based tabular representation learning via space calibration,\" in ICLR, 2024. 3, 9, 10" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 297, + 565, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 297, + 565, + 316 + ], + "spans": [ + { + "bbox": [ + 310, + 297, + 565, + 316 + ], + "type": "text", + "content": "[68] Y. Nader, L. Sixt, and T. Landgraf, \"DNNR: differential nearest neighbors regression,\" in ICML, 2022, pp. 16296-16317. 3, 7, 9, 10" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 316, + 565, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 316, + 565, + 344 + ], + "spans": [ + { + "bbox": [ + 310, + 316, + 565, + 344 + ], + "type": "text", + "content": "[69] Y. Gorishniy, I. Rubachev, N. Kartashev, D. Shlenskii, A. Kotelnikov, and A. Babenko, \"Tabr: Tabular deep learning meets nearest neighbors in 2023,\" in ICLR, 2024. 3, 6, 7, 9, 10, 19" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 345, + 565, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 345, + 565, + 381 + ], + "spans": [ + { + "bbox": [ + 310, + 345, + 565, + 381 + ], + "type": "text", + "content": "[70] G. Somepalli, A. Schwarzschild, M. Goldblum, C. B. Bruss, and T. Goldstein, \"SAINT: Improved neural networks for tabular data via row attention and contrastive pre-training,\" in NeurIPS Workshop, 2022. 3, 7, 9, 10, 11, 13, 14" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 382, + 565, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 382, + 565, + 409 + ], + "spans": [ + { + "bbox": [ + 310, + 382, + 565, + 409 + ], + "type": "text", + "content": "[71] I. Rubachev, A. Alekberov, Y. Gorishniy, and A. Babenko, \"Revisiting pretraining objectives for tabular deep learning,\" CoRR, vol. abs/2207.03208, 2022. 3, 7, 13, 14" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 410, + 565, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 410, + 565, + 437 + ], + "spans": [ + { + "bbox": [ + 310, + 410, + 565, + 437 + ], + "type": "text", + "content": "[72] S. Onishi, K. Oono, and K. Hayashi, \"Tabret: Pre-training transformer-based tabular models for unseen columns,\" CoRR, vol. abs/2303.15747, 2023. 3, 9, 12, 15" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 437, + 565, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 437, + 565, + 465 + ], + "spans": [ + { + "bbox": [ + 310, + 437, + 565, + 465 + ], + "type": "text", + "content": "[73] J. Shen, L. Li, L. M. Dery, C. Staten, M. Khodak, G. Neubig, and A. Talwalkar, \"Cross-modal fine-tuning: Align then refine,\" in ICML, 2023, pp. 31030-31056. 3, 9, 13, 15" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 466, + 565, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 466, + 565, + 502 + ], + "spans": [ + { + "bbox": [ + 310, + 466, + 565, + 502 + ], + "type": "text", + "content": "[74] Y. Zhu, T. Brettin, F. Xia, A. Partin, M. Shukla, H. Yoo, Y. A. Evrard, J. H. Doroshow, and R. L. Stevens, \"Converting tabular data into images for deep learning with convolutional neural networks,\" Scientific Reports, vol. 11, no. 11325, 2021. 3, 4, 9, 17" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 503, + 565, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 503, + 565, + 521 + ], + "spans": [ + { + "bbox": [ + 310, + 503, + 565, + 521 + ], + "type": "text", + "content": "[75] S. Lee and S.-C. Lee, \"Tableye: Seeing small tables through the lens of images,\" CoRR, vol. abs/2307.02491, 2023. 3, 9, 17" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 522, + 565, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 522, + 565, + 558 + ], + "spans": [ + { + "bbox": [ + 310, + 522, + 565, + 558 + ], + "type": "text", + "content": "[76] A. Mamdouh, M. El-Melegy, S. Ali, and R. Kikinis, \"Tab2visual: Overcoming limited data in tabular data classification using deep learning with visual representations,\" CoRR, vol. abs/2502.07181, 2025.3,9,17" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 559, + 565, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 565, + 586 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 565, + 586 + ], + "type": "text", + "content": "[77] Z. Wang and J. Sun, \"Transtab: Learning transferable tabular transformers across tables,\" in NeurIPS, 2022, pp. 2902-2915. 3, 9, 13, 16" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 587, + 565, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 587, + 565, + 615 + ], + "spans": [ + { + "bbox": [ + 310, + 587, + 565, + 615 + ], + "type": "text", + "content": "[78] J. Yan, B. Zheng, H. Xu, Y. Zhu, D. Z. Chen, J. Sun, J. Wu, and J. Chen, \"Making pre-trained language models great on tabular prediction,\" in ICLR, 2024. 3, 6, 9, 16, 17" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 616, + 565, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 616, + 565, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 616, + 565, + 643 + ], + "type": "text", + "content": "[79] C. Ye, G. Lu, H. Wang, L. Li, S. Wu, G. Chen, and J. Zhao, \"Towards cross-table masked pretraining for web data mining,\" in WWW, 2024, pp. 4449-4459. 3, 6, 9, 16" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 644, + 565, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 644, + 565, + 680 + ], + "spans": [ + { + "bbox": [ + 310, + 644, + 565, + 680 + ], + "type": "text", + "content": "[80] S. Hegselmann, A. Buendia, H. Lang, M. Agrawal, X. Jiang, and D. Sontag, \"Tabllm: few-shot classification of tabular data with large language models,\" in AISTATS, 2023, pp. 5549-5581. 3, 9, 13, 16, 17" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 681, + 565, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 681, + 565, + 708 + ], + "spans": [ + { + "bbox": [ + 310, + 681, + 565, + 708 + ], + "type": "text", + "content": "[81] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, \"From supervised to generative: A novel paradigm for tabular deep learning with large language models,\" in SIGKDD, 2024, pp. 3323-3333. 3, 6" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 709, + 565, + 745 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 709, + 565, + 745 + ], + "spans": [ + { + "bbox": [ + 310, + 709, + 565, + 745 + ], + "type": "text", + "content": "[82] N. Hollmann, S. Müller, and F. Hutter, \"Large language models for automated data science: Introducing CAAFE for context-aware automated feature engineering,\" in NeurIPS, 2023, pp. 44753-44775. 3, 9" + } + ] + } + ], + "index": 50 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 43, + 302, + 746 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 47, + 43, + 302, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 43, + 302, + 71 + ], + "spans": [ + { + "bbox": [ + 47, + 43, + 302, + 71 + ], + "type": "text", + "content": "[83] S. Han, J. Yoon, S. Ö. Arik, and T. Pfister, \"Large language models can automatically engineer features for few-shot tabular learning,\" in ICML, 2024, pp. 17454-17479. 3, 9" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 109 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 109 + ], + "type": "text", + "content": "[84] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., \"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt,\" International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024. 3, 17" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 110, + 301, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 110, + 301, + 137 + ], + "spans": [ + { + "bbox": [ + 47, + 110, + 301, + 137 + ], + "type": "text", + "content": "[85] Y. Liang, H. Wen, Y. Nie, Y. Jiang, M. Jin, D. Song, S. Pan, and Q. Wen, \"Foundation models for time series analysis: A tutorial and survey,\" in SIGKDD, 2024, pp. 6555-6565. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 137, + 301, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 137, + 301, + 164 + ], + "spans": [ + { + "bbox": [ + 47, + 137, + 301, + 164 + ], + "type": "text", + "content": "[86] H.-J. Ye, Q.-L. Zhou, H.-H. Yin, D.-C. Zhan, and W.-L. Chao, \"Rethinking pre-training in tabular data: A neighborhood embedding perspective,\" CoRR, vol. abs/2311.00055, 2025. 3, 9, 18" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 165, + 301, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 165, + 301, + 193 + ], + "spans": [ + { + "bbox": [ + 47, + 165, + 301, + 193 + ], + "type": "text", + "content": "[87] D. Bonet, D. M. Montserrat, X. G. i Nieto, and A. G. Ioannidis, \"Hyperfast: Instant classification for tabular data,\" in AAAI, 2024, pp. 11 114-11 123. 3, 7, 9, 18" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 194, + 301, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 194, + 301, + 220 + ], + "spans": [ + { + "bbox": [ + 47, + 194, + 301, + 220 + ], + "type": "text", + "content": "[88] A. Müller, C. Curino, and R. Ramakrishnan, \"Mothernet: Fast training and inference via hyper-network transformers,\" in ICLR, 2025. 3, 8, 9, 18" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 221, + 301, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 221, + 301, + 248 + ], + "spans": [ + { + "bbox": [ + 47, + 221, + 301, + 248 + ], + "type": "text", + "content": "[89] N. Hollmann, S. Müller, K. Eggensperger, and F. Hutter, \"Tabpfn: A transformer that solves small tabular classification problems in a second,\" in ICLR, 2023. 3, 6, 7, 8, 9, 10, 16, 18, 19" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 249, + 301, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 249, + 301, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 249, + 301, + 277 + ], + "type": "text", + "content": "[90] V. Thomas, J. Ma, R. Hosseinzadeh, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, \"Retrieval & fine-tuning for in-context tabular models,\" in NeurIPS, 2024, pp. 108439-108467. 3, 10, 19" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 277, + 301, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 277, + 301, + 314 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 301, + 314 + ], + "type": "text", + "content": "[91] N. Hollmann, S. Müller, L. Purucker, A. Krishnakumar, M. Körfer, S. B. Hoo, R. T. Schirrmeister, and F. Hutter, \"Accurate predictions on small data with a tabular foundation model,\" Nature, vol. 637, no. 8045, pp. 319-326, 2025. 3, 9, 10, 18, 19, 20" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 315, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 315, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 315, + 301, + 342 + ], + "type": "text", + "content": "[92] J. Gardner, J. C. Perdomo, and L. Schmidt, \"Large scale transfer learning for tabular data via language modeling,\" in NeurIPS, 2024, pp. 45155-45205. 3, 6, 9, 19" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 342, + 301, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 342, + 301, + 378 + ], + "spans": [ + { + "bbox": [ + 47, + 342, + 301, + 378 + ], + "type": "text", + "content": "[93] X. Wen, H. Zhang, S. Zheng, W. Xu, and J. Bian, \"From supervised to generative: A novel paradigm for tabular deep learning with large language models,\" in SIGKDD, 2024, pp. 3323-3333. 3, 9, 17, 19" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "type": "text", + "content": "[94] X. Wen, S. Zheng, Z. Xu, Y. Sun, and J. Bian, \"Scalable in-context learning on tabular data via retrieval-augmented large language models,\" CoRR, vol. abs/2502.03147, 2025. 3, 9, 19" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 407, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 301, + 435 + ], + "type": "text", + "content": "[95] Y. Gorishniy, A. Kotelnikov, and A. Babenko, \"Tabm: Advancing tabular deep learning with parameter-efficient ensembling,\" CoRR, vol. abs/2410.24210, 2024. 3, 20, 21" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 436, + 301, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 436, + 301, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 436, + 301, + 464 + ], + "type": "text", + "content": "[96] S.-Y. Liu and H.-J. Ye, \"Tabpfn unleashed: A scalable and effective solution to tabular classification problems,\" CoRR, vol. abs/2502.02527, 2025. 3, 17, 19, 20" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 464, + 301, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 464, + 301, + 483 + ], + "spans": [ + { + "bbox": [ + 47, + 464, + 301, + 483 + ], + "type": "text", + "content": "[97] J. Svirsky and O. Lindenbaum, \"Interpretable deep clustering for tabular data,\" in ICML, 2024, pp. 47314-47330. 3, 20" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 483, + 301, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 483, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 483, + 301, + 502 + ], + "type": "text", + "content": "[98] H. T. Rauf, A. Freitas, and N. W. Paton, \"Tabledc: Deep clustering for tabular data,\" CoRR, vol. abs/2405.17723, 2024. 3, 20" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 502, + 301, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 301, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 301, + 529 + ], + "type": "text", + "content": "[99] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, \"Adbench: Anomaly detection benchmark,\" in NeurIPS, 2022, pp. 32142-32159. 3, 20" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 530, + 301, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 530, + 301, + 549 + ], + "spans": [ + { + "bbox": [ + 47, + 530, + 301, + 549 + ], + "type": "text", + "content": "[100] T. Shenkar and L. Wolf, \"Anomaly detection for tabular data with internal contrastive learning,\" in ICLR, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 550, + 301, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 568 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 568 + ], + "type": "text", + "content": "[101] J. Yin, Y. Qiao, Z. Zhou, X. Wang, and J. Yang, \"MCM: masked cell modeling for anomaly detection in tabular data,\" in ICLR, 2024. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 569, + 301, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 569, + 301, + 605 + ], + "spans": [ + { + "bbox": [ + 47, + 569, + 301, + 605 + ], + "type": "text", + "content": "[102] L. Hansen, N. Seedat, M. van der Schaar, and A. Petrovic, \"Reimagining synthetic tabular data generation through data-centric AI: A comprehensive benchmark,\" in NeurIPS, 2023, pp. 33781-33823. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 605, + 301, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 301, + 642 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 301, + 642 + ], + "type": "text", + "content": "[103] C. Hou, S. Gu, C. Xu, and Y. Qian, \"Incremental learning for simultaneous augmentation of feature and class,\" IEEE Transactions on pattern analysis and machine intelligence, vol. 45, no. 12, pp. 14789-14806, 2023. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 643, + 301, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 670 + ], + "type": "text", + "content": "[104] M. Vero, M. Balunovic, and M. T. Vechev, \"Cuts: Customizable tabular synthetic data generation,\" in ICML, 2024, pp. 49408-49433. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 670, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 301, + 689 + ], + "type": "text", + "content": "[105] S. Ö. Arik and T. Pfister, \"Tabnet: Attentive interpretable tabular learning,\" in AAAI, 2021, pp. 6679-6687. 3, 7, 8, 9, 12, 20" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 689, + 301, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 718 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 718 + ], + "type": "text", + "content": "[106] P. Hager, M. J. Menten, and D. Rueckert, \"Best of both worlds: Multimodal contrastive learning with tabular and imaging data,\" in CVPR, 2023, pp. 23924-23935. 3, 7, 21" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "type": "text", + "content": "[107] J.-P. Jiang, H.-J. Ye, L. Wang, Y. Yang, Y. Jiang, and D.-C. Zhan, \"Tabular insights, visual impacts: Transferring expertise from tables to images,\" in ICML, 2024, pp. 21988-22009. 3, 7, 21" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 565, + 746 + ], + "type": "list", + "angle": 0, + "index": 56, + "blocks": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "spans": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "type": "text", + "content": "[108] Y. Diao, Y. Yang, Q. Li, B. He, and M. Lu, \"Oebench: Investigating open environment challenges in real-world relational data streams,\" VLDB, vol. 17, no. 6, pp. 1283-1296, 2024. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "type": "text", + "content": "[109] I. Rubachev, N. Kartashev, Y. Gorishniy, and A. Babenko, \"Tabred: A benchmark of tabular machine learning in-the-wild,\" CoRR, vol. abs/2406.19380, 2024. 3, 6, 8, 20, 21" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 99, + 565, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 99, + 565, + 125 + ], + "spans": [ + { + "bbox": [ + 310, + 99, + 565, + 125 + ], + "type": "text", + "content": "[110] J. Gardner, Z. Popovic, and L. Schmidt, \"Benchmarking distribution shift in tabular data with tableshift,\" in NeurIPS, 2024, pp. 53385-53432. 3, 20, 21" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 126, + 565, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 126, + 565, + 153 + ], + "spans": [ + { + "bbox": [ + 310, + 126, + 565, + 153 + ], + "type": "text", + "content": "[111] Z.-H. Zhou, \"Learnability with time-sharing computational resource concerns,\" National Science Review, vol. 11, no. 10, p. nwae204, 2024. 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 154, + 565, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 154, + 565, + 173 + ], + "spans": [ + { + "bbox": [ + 310, + 154, + 565, + 173 + ], + "type": "text", + "content": "[112] N. Jin, J. Siebert, D. Li, and Q. Chen, \"A survey on table question answering: recent advances,\" in CCKS, 2022, pp. 174-186. 3, 21" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 173, + 565, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 173, + 565, + 209 + ], + "spans": [ + { + "bbox": [ + 310, + 173, + 565, + 209 + ], + "type": "text", + "content": "[113] X. Fang, W. Xu, F. A. Tan, J. Zhang, Z. Hu, Y. Qi, S. Nickleach, D. Socolinsky, S. Sengamedu, and C. Faloutsos, \"Large language models (llms) on tabular data: Prediction, generation, and understanding-a survey,\" CoRR, vol. abs/2402.17944, 2024. 3, 21" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 210, + 565, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 210, + 565, + 236 + ], + "spans": [ + { + "bbox": [ + 310, + 210, + 565, + 236 + ], + "type": "text", + "content": "[114] C. Winship and R. D. Mare, \"Regression models with ordinal variables,\" American sociological review, vol. 49, no. 4, pp. 512-525, 1984. 3" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 237, + 565, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 237, + 565, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 237, + 565, + 274 + ], + "type": "text", + "content": "[115] P. A. Gutierrez, M. Perez-Ortiz, J. Sánchez-Monedero, F. Fernández-Navarro, and C. Hervás-Martínez, \"Ordinal regression methods: Survey and experimental study,\" IEEE Trans. Knowl. Data Eng., vol. 28, no. 1, pp. 127-146, 2016. 3" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 274, + 565, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 565, + 310 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 565, + 310 + ], + "type": "text", + "content": "[116] A. Jeffares, A. Curth, and M. van der Schaar, \"Deep learning through A telescoping lens: A simple model provides empirical insights on grokking, gradient boosting & beyond,\" in NeurIPS, 2024, pp. 123-498-123-533. 4" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 311, + 565, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 311, + 565, + 338 + ], + "spans": [ + { + "bbox": [ + 310, + 311, + 565, + 338 + ], + "type": "text", + "content": "[117] G. Cormode, P. Indyk, N. Koudas, and S. Muthukrishnan, \"Fast mining of massive tabular data via approximate distance computations,\" in ICDE, 2002, pp. 605-614. 4" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 339, + 565, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 339, + 565, + 357 + ], + "spans": [ + { + "bbox": [ + 310, + 339, + 565, + 357 + ], + "type": "text", + "content": "[118] M. D. Adelfio and H. Samet, \"Schema extraction for tabular data on the web,\" VLDB, vol. 6, no. 6, pp. 421-432, 2013. 4" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 357, + 565, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 357, + 565, + 375 + ], + "spans": [ + { + "bbox": [ + 310, + 357, + 565, + 375 + ], + "type": "text", + "content": "[119] J. F. Arias, A. K. Chhabra, and V. Misra, \"Efficient interpretation of tabular documents,\" in ICPR, 1996, pp. 681-685. 4" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 376, + 565, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 565, + 403 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 565, + 403 + ], + "type": "text", + "content": "[120] H.-L. Wang, S.-H. Wu, K. K. Wang, C.-L. Sung, W.-L. Hsu, and W.-K. Shih, \"Semantic search on internet tabular information extraction for answering queries,\" in CIKM, 2000, pp. 243-249. 4" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 403, + 565, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 403, + 565, + 422 + ], + "spans": [ + { + "bbox": [ + 310, + 403, + 565, + 422 + ], + "type": "text", + "content": "[121] M.-J. Nederhof, \"An optimal tabular parsing algorithm,\" in ACL, 1994, pp. 117-124. 4" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 422, + 565, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 422, + 565, + 449 + ], + "spans": [ + { + "bbox": [ + 310, + 422, + 565, + 449 + ], + "type": "text", + "content": "[122] J. F. Arias, A. K. Chhabra, and V. Misra, \"Interpreting and representing tabular documents,\" in CVPR, 1996, pp. 600-605. 4" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 449, + 565, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 449, + 565, + 468 + ], + "spans": [ + { + "bbox": [ + 310, + 449, + 565, + 468 + ], + "type": "text", + "content": "[123] G. Richards and V. J. Rayward-Smith, \"Discovery of association rules in tabular data,\" in ICDM, 2001, pp. 465-472. 4" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 468, + 565, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 468, + 565, + 486 + ], + "spans": [ + { + "bbox": [ + 310, + 468, + 565, + 486 + ], + "type": "text", + "content": "[124] J. R. Quinlan, \"Induction of decision trees,\" Machine learning, vol. 1, pp. 81-106, 1986. 4" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 487, + 565, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 487, + 565, + 506 + ], + "spans": [ + { + "bbox": [ + 310, + 487, + 565, + 506 + ], + "type": "text", + "content": "[125] L. Breiman, J. Friedman, R. Olshen, and C. J. Stone, Classification and Regression Trees. Chapman and Hall/CRC, 1984. 4" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 506, + 565, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 506, + 565, + 533 + ], + "spans": [ + { + "bbox": [ + 310, + 506, + 565, + 533 + ], + "type": "text", + "content": "[126] Y. Freund and R. E. Schapire, “A desicion-theoretic generalization of on-line learning and an application to boosting,” in EuroCOLT, 1995, pp. 23-37. 4, 19" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 533, + 565, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 533, + 565, + 551 + ], + "spans": [ + { + "bbox": [ + 310, + 533, + 565, + 551 + ], + "type": "text", + "content": "[127] L. Breiman, \"Random forests,\" Machine Learning, vol. 45, no. 1, pp. 5-32, 2001. 4, 19" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 552, + 565, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 552, + 565, + 570 + ], + "spans": [ + { + "bbox": [ + 310, + 552, + 565, + 570 + ], + "type": "text", + "content": "[128] J. H. Friedman, \"Greedy function approximation: a gradient boosting machine,\" Annals of statistics, pp. 1189-1232, 2001. 4" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 571, + 565, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 571, + 565, + 589 + ], + "spans": [ + { + "bbox": [ + 310, + 571, + 565, + 589 + ], + "type": "text", + "content": "[129] ——, \"Stochastic gradient boosting,\" Computational statistics & data analysis, vol. 38, no. 4, pp. 367-378, 2002. 4" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 589, + 565, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 589, + 565, + 607 + ], + "spans": [ + { + "bbox": [ + 310, + 589, + 565, + 607 + ], + "type": "text", + "content": "[130] T. Chen and C. Guestrin, \"Xgboost: A scalable tree boosting system,\" in KDD, 2016, pp. 785-794. 4, 8, 18, 20" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 607, + 565, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 607, + 565, + 635 + ], + "spans": [ + { + "bbox": [ + 310, + 607, + 565, + 635 + ], + "type": "text", + "content": "[131] G. Ke, Q. Meng, T. Finley, T. Wang, W. Chen, W. Ma, Q. Ye, and T.-Y. Liu, \"Lightgbm: A highly efficient gradient boosting decision tree,\" in NIPS, 2017, pp. 3146-3154. 4, 8, 20" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 635, + 565, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 635, + 565, + 662 + ], + "spans": [ + { + "bbox": [ + 310, + 635, + 565, + 662 + ], + "type": "text", + "content": "[132] L. O. Prokhorenkova, G. Gusev, A. Vorobev, A. V. Dorogush, and A. Gulin, \"Catboost: unbiased boosting with categorical features,\" in NeurIPS, 2018, pp. 6639-6649. 4, 8" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 663, + 565, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 663, + 565, + 689 + ], + "spans": [ + { + "bbox": [ + 310, + 663, + 565, + 689 + ], + "type": "text", + "content": "[133] D. Nielsen, \"Tree boosting with xgboost-why does xgboost win \"every\" machine learning competition?\" Master's thesis, NTNU, 2016. 4" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 310, + 689, + 565, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 689, + 565, + 718 + ], + "spans": [ + { + "bbox": [ + 310, + 689, + 565, + 718 + ], + "type": "text", + "content": "[134] S. Makridakis, E. Spiliotis, and V. Assimakopoulos, \"M5 accuracy competition: Results, findings, and conclusions,\" International Journal of Forecasting, vol. 38, no. 4, pp. 1346-1364, 2022. 4" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 310, + 718, + 565, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 718, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 718, + 565, + 746 + ], + "type": "text", + "content": "[135] H. Larochelle, D. Erhan, A. Courville, J. Bergstra, and Y. Bengio, \"An empirical evaluation of deep architectures on problems with many factors of variation,\" in ICML, 2007, pp. 473-480. 4" + } + ] + } + ], + "index": 55 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 44, + 302, + 746 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 47, + 44, + 302, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 44, + 302, + 71 + ], + "spans": [ + { + "bbox": [ + 47, + 44, + 302, + 71 + ], + "type": "text", + "content": "[136] R. Salakhutdinov and G. Hinton, \"Learning a nonlinear embedding by preserving class neighbourhood structure,\" in AISTATS, 2007, pp. 412-419. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "text", + "content": "[137] R. Min, D. A. Stanley, Z. Yuan, A. Bonner, and Z. Zhang, “A deep non-linear feature mapping for large-margin knn classification,” in ICDM, 2009, pp. 357-366. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 99, + 301, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 99, + 301, + 125 + ], + "spans": [ + { + "bbox": [ + 47, + 99, + 301, + 125 + ], + "type": "text", + "content": "[138] M. Ahmed, A. N. Mahmood, and J. Hu, \"A survey of network anomaly detection techniques,\" Journal of Network and Computer Applications, vol. 60, pp. 19-31, 2016. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 126, + 301, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 126, + 301, + 153 + ], + "spans": [ + { + "bbox": [ + 47, + 126, + 301, + 153 + ], + "type": "text", + "content": "[139] L. Lu, M. Medo, C. H. Yeung, Y.-C. Zhang, Z.-K. Zhang, and T. Zhou, \"Recommender systems,\" Physics reports, vol. 519, no. 1, pp. 1-49, 2012. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 154, + 301, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 154, + 301, + 189 + ], + "spans": [ + { + "bbox": [ + 47, + 154, + 301, + 189 + ], + "type": "text", + "content": "[140] D. Salinas, V. Flunkert, J. Gasthaus, and T. Januschowski, \"Deepar: Probabilistic forecasting with autoregressive recurrent networks,\" International journal of forecasting, vol. 36, no. 3, pp. 1181-1191, 2020. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 190, + 301, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 190, + 301, + 217 + ], + "spans": [ + { + "bbox": [ + 47, + 190, + 301, + 217 + ], + "type": "text", + "content": "[141] T.-J. Huang, X.-Y. Chen, and H.-J. Ye, \"Seqfusion: Sequential fusion of pre-trained models for zero-shot time-series forecasting,\" CoRR, vol. abs/2503.02836, 2025. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 217, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 217, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 217, + 301, + 236 + ], + "type": "text", + "content": "[142] Q. Liu, F. Yu, S. Wu, and L. Wang, \"A convolutional click prediction model,\" in CIKM, 2015, pp. 1743-1746. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 236, + 301, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 301, + 262 + ], + "type": "text", + "content": "[143] H. Guo, R. Tang, Y. Ye, Z. Li, and X. He, \"Deepfm: A factorization-machine based neural network for CTR prediction,\" in IJCAI, 2017, pp. 1725-1731. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 263, + 301, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 301, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 301, + 289 + ], + "type": "text", + "content": "[144] S. Somvanshi, S. Das, S. A. Javed, G. Antariksa, and A. Hossain, \"A survey on deep tabular learning,\" CoRR, vol. abs/2410.12034, 2024. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 290, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 301, + 308 + ], + "type": "text", + "content": "[145] D. Lane, D. Scott, M. Hebl, R. Guerra, D. Osherson, and H. Zimmer, Introduction to statistics. CiteSeer, 2003. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 309, + 301, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 309, + 301, + 335 + ], + "spans": [ + { + "bbox": [ + 47, + 309, + 301, + 335 + ], + "type": "text", + "content": "[146] A. F. Karr, A. P. Sanil, and D. L. Banks, \"Data quality: A statistical perspective,\" Statistical Methodology, vol. 3, no. 2, pp. 137-173, 2006. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 336, + 301, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 336, + 301, + 372 + ], + "spans": [ + { + "bbox": [ + 47, + 336, + 301, + 372 + ], + "type": "text", + "content": "[147] A. Sánchez-Morales, J.-L. Sancho-Gómez, J.-A. Martínez-García, and A. R. Figueiras-Vidal, \"Improving deep learning performance with missing values via deletion and compensation,\" Neural Computing and Applications, vol. 32, pp. 13233-13244, 2020. 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 373, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 373, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 373, + 301, + 399 + ], + "type": "text", + "content": "[148] D. Chicco, L. Oneto, and E. Tavazzi, \"Eleven quick tips for data cleaning and feature engineering,\" PLOS Computational Biology, vol. 18, no. 12, p. e1010718, 2022. 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 399, + 301, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 301, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 301, + 426 + ], + "type": "text", + "content": "[149] Y. Luo, M. Wang, H. Zhou, Q. Yao, W.-W. Tu, Y. Chen, W. Dai, and Q. Yang, \"Autocross: Automatic feature crossing for tabular data in real-world applications,\" in KDD, 2019, pp. 1936-1945. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 427, + 301, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 427, + 301, + 453 + ], + "spans": [ + { + "bbox": [ + 47, + 427, + 301, + 453 + ], + "type": "text", + "content": "[150] H. He and E. A. Garcia, \"Learning from imbalanced data,\" IEEE Transactions on knowledge and data engineering, vol. 21, no. 9, pp. 1263-1284, 2009. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 454, + 301, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 454, + 301, + 472 + ], + "spans": [ + { + "bbox": [ + 47, + 454, + 301, + 472 + ], + "type": "text", + "content": "[151] H. He and Y. Ma, Imbalanced learning: foundations, algorithms, and applications. John Wiley & Sons, 2013. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 473, + 301, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 473, + 301, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 473, + 301, + 491 + ], + "type": "text", + "content": "[152] T. Lin, P. Goyal, R. B. Girshick, K. He, and P. Dollar, \"Focal loss for dense object detection,\" in ICCV, 2017, pp. 2999-3007. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 491, + 301, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 301, + 517 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 301, + 517 + ], + "type": "text", + "content": "[153] J. M. Johnson and T. M. Khoshgoftaar, \"Survey on deep learning with class imbalance,\" Journal of big data, vol. 6, no. 1, pp. 1-54, 2019. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 518, + 301, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 518, + 301, + 545 + ], + "spans": [ + { + "bbox": [ + 47, + 518, + 301, + 545 + ], + "type": "text", + "content": "[154] J. Engelmann and S. Lessmann, \"Conditional Wasserstein gan-based oversampling of tabular data for imbalanced learning,\" Expert Systems with Applications, vol. 174, p. 114582, 2021. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 546, + 301, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 546, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 546, + 301, + 573 + ], + "type": "text", + "content": "[155] R. Sauber-Cole and T. M. Khoshgoftaar, \"The use of generative adversarial networks to alleviate class imbalance in tabular data: a survey,\" Journal of Big Data, vol. 9, no. 1, p. 98, 2022. 5, 21" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 574, + 301, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 301, + 600 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 301, + 600 + ], + "type": "text", + "content": "[156] X.-Y. Liu, J. Wu, and Z.-H. Zhou, \"Exploratory undersampling for class-imbalance learning,\" IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550, 2008. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 601, + 301, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 601, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 601, + 301, + 628 + ], + "type": "text", + "content": "[157] N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer, \"SMOTE: synthetic minority over-sampling technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 628, + 301, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 628, + 301, + 664 + ], + "spans": [ + { + "bbox": [ + 47, + 628, + 301, + 664 + ], + "type": "text", + "content": "[158] A. Fernández, S. García, F. Herrera, and N. V. Chawla, \"SMOTE for learning from imbalanced data: Progress and challenges, marking the 15-year anniversary,\" Journal of Artificial Intelligence Research, vol. 61, pp. 863-905, 2018. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 664, + 301, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 664, + 301, + 691 + ], + "spans": [ + { + "bbox": [ + 47, + 664, + 301, + 691 + ], + "type": "text", + "content": "[159] K. Cao, C. Wei, A. Gaidon, N. Arechiga, and T. Ma, \"Learning imbalanced datasets with label-distribution-aware margin loss,\" in NeurIPS, 2019, pp. 1567-1578. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 692, + 301, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 692, + 301, + 718 + ], + "spans": [ + { + "bbox": [ + 47, + 692, + 301, + 718 + ], + "type": "text", + "content": "[160] Y. Cui, M. Jia, T.-Y. Lin, Y. Song, and S. Belongie, \"Class-balanced loss based on effective number of samples,\" in CVPR, 2019, pp. 9268-9277. 5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "type": "text", + "content": "[161] Y. Xie, Z. Wang, Y. Li, B. Ding, N. M. Gurel, C. Zhang, M. Huang, W. Lin, and J. Zhou, \"Fives: Feature interaction via edge search for large-scale tabular data,\" in SIGKDD, 2021, pp. 3795-3805. 5" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 565, + 746 + ], + "type": "list", + "angle": 0, + "index": 55, + "blocks": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "spans": [ + { + "bbox": [ + 310, + 44, + 565, + 71 + ], + "type": "text", + "content": "[162] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, \"Annotatedtables: A large tabular dataset with language model annotations,\" CoRR, vol. abs/2406.16349, 2024. 5" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 565, + 99 + ], + "type": "text", + "content": "[163] A. Klein and F. Hutter, \"Tabular benchmarks for joint architecture and hyperparameter optimization,\" CoRR, vol. abs/1905.04970, 2019. 5" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 100, + 565, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 565, + 128 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 565, + 128 + ], + "type": "text", + "content": "[164] P. Pokhrel, \"A comparison of automl hyperparameter optimization tools for tabular data,\" Ph.D. dissertation, Youngstown State University, 2023. 5" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 129, + 565, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 129, + 565, + 147 + ], + "spans": [ + { + "bbox": [ + 310, + 129, + 565, + 147 + ], + "type": "text", + "content": "[165] F. Hutter, L. Kotthoff, and J. Vanschoren, Automated machine learning: methods, systems, challenges. Springer Nature, 2019. 5" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 148, + 565, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 148, + 565, + 166 + ], + "spans": [ + { + "bbox": [ + 310, + 148, + 565, + 166 + ], + "type": "text", + "content": "[166] X. He, K. Zhao, and X. Chu, \"Automl: A survey of the state-of-the-art,\" Knowledge-based systems, vol. 212, p. 106622, 2021. 5" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 167, + 565, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 167, + 565, + 194 + ], + "spans": [ + { + "bbox": [ + 310, + 167, + 565, + 194 + ], + "type": "text", + "content": "[167] M. Feurer, K. Eggensperger, S. Falkner, M. Lindauer, and F. Hutter, \"Auto-sklearn 2.0: Hands-free automl via meta-learning,\" Journal of Machine Learning Research, vol. 23, no. 261, pp. 1-61, 2022. 5" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 194, + 565, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 194, + 565, + 221 + ], + "spans": [ + { + "bbox": [ + 310, + 194, + 565, + 221 + ], + "type": "text", + "content": "[168] C. Mennella, U. Maniscalco, G. De Pietro, and M. Esposito, \"Ethical and regulatory challenges of ai technologies in healthcare: A narrative review,\" Heliyon, vol. 10, no. 4, 2024. 5" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 222, + 565, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 222, + 565, + 250 + ], + "spans": [ + { + "bbox": [ + 310, + 222, + 565, + 250 + ], + "type": "text", + "content": "[169] W. Moore and S. Frye, \"Review of hipaa, part 1: history, protected health information, and privacy and security rules,\" Journal of nuclear medicine technology, vol. 47, no. 4, pp. 269-272, 2019. 5" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 251, + 565, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 565, + 277 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 565, + 277 + ], + "type": "text", + "content": "[170] D. F. Sittig and H. Singh, \"Legal, ethical, and financial dilemmas in electronic health record adoption and use,\" Pediatrics, vol. 127, no. 4, pp. e1042-e1047, 2011. 5" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 278, + 565, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 278, + 565, + 315 + ], + "spans": [ + { + "bbox": [ + 310, + 278, + 565, + 315 + ], + "type": "text", + "content": "[171] J. Amann, A. Blasimme, E. Vayena, D. Frey, V. I. Madai, and P. Consortium, \"Explainability for artificial intelligence in healthcare: a multidisciplinary perspective,\" BMC medical informatics and decision making, vol. 20, pp. 1-9, 2020. 5" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 316, + 565, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 316, + 565, + 343 + ], + "spans": [ + { + "bbox": [ + 310, + 316, + 565, + 343 + ], + "type": "text", + "content": "[172] B. S. Caffo, F. A. D'Asaro, A. Garcez, and E. Raffinetti, \"Explainable artificial intelligence models and methods in finance and healthcare,\" p. 970246, 2022. 5" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 344, + 565, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 344, + 565, + 362 + ], + "spans": [ + { + "bbox": [ + 310, + 344, + 565, + 362 + ], + "type": "text", + "content": "[173] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger, \"On calibration of modern neural networks,\" in ICML, 2017, pp. 1321-1330. 5" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 363, + 565, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 363, + 565, + 390 + ], + "spans": [ + { + "bbox": [ + 310, + 363, + 565, + 390 + ], + "type": "text", + "content": "[174] K. Helli, D. Schnurr, N. Hollmann, S. Müller, and F. Hutter, \"Drift-resilient tabpfn: In-context learning temporal distribution shifts on tabular data,\" in NeurIPS, 2024, pp. 98742-98781. 5, 21" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 391, + 565, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 391, + 565, + 418 + ], + "spans": [ + { + "bbox": [ + 310, + 391, + 565, + 418 + ], + "type": "text", + "content": "[175] J. Demsr, \"Statistical comparisons of classifiers over multiple data sets,\" Journal of Machine Learning Research, vol. 7, pp. 1-30, 2006. 5" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 418, + 565, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 418, + 565, + 445 + ], + "spans": [ + { + "bbox": [ + 310, + 418, + 565, + 445 + ], + "type": "text", + "content": "[176] Y. Gorishniy, A. Kotelnikov, and A. Babenko, \"Tabm: Advancing tabular deep learning with parameter-efficient ensembling,\" in ICLR, 2025. 5, 19" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 447, + 565, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 565, + 465 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 565, + 465 + ], + "type": "text", + "content": "[177] M. E. Glickman and A. C. Jones, \"Rating the chess rating system,\" CHANCE-BERLIN THEN NEW YORK-, vol. 12, pp. 21-28, 1999. 5" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 466, + 565, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 466, + 565, + 494 + ], + "spans": [ + { + "bbox": [ + 310, + 466, + 565, + 494 + ], + "type": "text", + "content": "[178] L. M. Hvattum and H. Arntzen, \"Using elo ratings for match result prediction in association football,\" International Journal of forecasting, vol. 26, no. 3, pp. 460-470, 2010. 5" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 495, + 565, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 495, + 565, + 530 + ], + "spans": [ + { + "bbox": [ + 310, + 495, + 565, + 530 + ], + "type": "text", + "content": "[179] J. Ma, V. Thomas, R. Hosseinzadeh, H. Kamkari, A. Labach, J. C. Cresswell, K. Golestan, G. Yu, M. Volkovs, and A. L. Caterini, \"Tabdpt: Scaling tabular foundation models,\" CoRR, vol. abs/2410.18164, 2024. 6, 18, 19" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 531, + 565, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 531, + 565, + 559 + ], + "spans": [ + { + "bbox": [ + 310, + 531, + 565, + 559 + ], + "type": "text", + "content": "[180] A. Tschalzev, L. Purucker, S. Lüdtke, F. Hutter, C. Bartelt, and H. Stuckenschmidt, \"Unreflected use of tabular data repositories can undermine research quality,\" in ICLR Workshop, 2025. 6, 7" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 559, + 565, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 565, + 586 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 565, + 586 + ], + "type": "text", + "content": "[181] S. B. Rabbani, I. V. Medri, and M. D. Samad, \"Attention versus contrastive learning of tabular data - A data-centric benchmarking,\" CoRR, vol. abs/2401.04266, 2024. 6" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 587, + 565, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 587, + 565, + 614 + ], + "spans": [ + { + "bbox": [ + 310, + 587, + 565, + 614 + ], + "type": "text", + "content": "[182] Y. Yang, Y. Wang, G. Liu, L. Wu, and Q. Liu, \"Unitabe: A universal pretraining protocol for tabular foundation model in data science,\" in ICLR, 2024. 6, 9, 16" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 615, + 565, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 615, + 565, + 633 + ], + "spans": [ + { + "bbox": [ + 310, + 615, + 565, + 633 + ], + "type": "text", + "content": "[183] G. Eggert, K. Huo, M. Biven, and J. Waugh, \"Tablib: A dataset of 627m tables with context,\" CoRR, vol. abs/2310.07875, 2023. 6" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 634, + 565, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 634, + 565, + 670 + ], + "spans": [ + { + "bbox": [ + 310, + 634, + 565, + 670 + ], + "type": "text", + "content": "[184] H. W. Jian Yang, Xuefeng Li, \"DeepTables: A Deep Learning Python Package for Tabular Data,\" https://github.com/DataCanvasIO/DeepTables, 2022, version 0.2.x.6" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 671, + 565, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 671, + 565, + 698 + ], + "spans": [ + { + "bbox": [ + 310, + 671, + 565, + 698 + ], + "type": "text", + "content": "[185] N. Erickson, J. Mueller, A. Shirkov, H. Zhang, P. Larroy, M. Li, and A. Smola, \"Autogluon-tabular: Robust and accurate automl for structured data,\" CoRR, vol. abs/2003.06505, 2020. 6" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 700, + 565, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 565, + 718 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 565, + 718 + ], + "type": "text", + "content": "[186] M. Joseph, \"Pytorch tabular: A framework for deep learning with tabular data,\" CoRR, vol. abs/2104.13638, 2021. 6" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 310, + 719, + 565, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 719, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 719, + 565, + 746 + ], + "type": "text", + "content": "[187] J. R. Zaurin and P. Mulinka, \"pytorch-widedeep: A flexible package for multimodal deep learning,\" Journal of Open Source Software, vol. 8, no. 86, p. 5027, Jun. 2023. 6" + } + ] + } + ], + "index": 54 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 70 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 43, + 301, + 70 + ], + "spans": [ + { + "bbox": [ + 47, + 43, + 301, + 70 + ], + "type": "text", + "content": "[188] S.-Y. Liu, H.-R. Cai, Q.-L. Zhou, and H.-J. Ye, \"TALENT: A tabular analytics and learning toolbox,\" CoRR, vol. abs/2407.04057, 2024. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "text", + "content": "[189] T. Akiba, S. Sano, T. Yanase, T. Ohta, and M. Koyama, \"Optuna: A next-generation hyperparameter optimization framework,\" in KDD, 2019, pp. 2623-2631. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 99, + 301, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 99, + 301, + 125 + ], + "spans": [ + { + "bbox": [ + 47, + 99, + 301, + 125 + ], + "type": "text", + "content": "[190] N. Morgan and H. Bourlard, \"Generalization and parameter estimation in feedforward nets: Some experiments,\" in NeuIPS, 1989, pp. 630-637. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 126, + 301, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 126, + 301, + 144 + ], + "spans": [ + { + "bbox": [ + 47, + 126, + 301, + 144 + ], + "type": "text", + "content": "[191] S. Arlot and A. Celisse, \"A survey of cross-validation procedures for model selection,\" CoRR, vol. abs/0907.4728, 2009. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 144, + 301, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 144, + 301, + 172 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 301, + 172 + ], + "type": "text", + "content": "[192] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, T.-W. Chen, and T.-H. Chang, \"Prompt: Towards a better deep neural network for tabular data,\" in ICML, 2023, pp. 4392-4434. 7, 9, 10" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 172, + 301, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 172, + 301, + 198 + ], + "spans": [ + { + "bbox": [ + 47, + 172, + 301, + 198 + ], + "type": "text", + "content": "[193] S. Marton, S. Lüdtke, C. Bartelt, and H. Stuckenschmidt, \"GRANDE: gradient-based decision tree ensembles for tabular data,\" in ICLR, 2024. 7, 8, 9, 12, 19" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 199, + 301, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 199, + 301, + 234 + ], + "spans": [ + { + "bbox": [ + 47, + 199, + 301, + 234 + ], + "type": "text", + "content": "[194] X. Jiang, A. Margeloiu, N. Simidjievski, and M. Jamnik, \"Protogate: Prototype-based neural networks with global-to-local feature selection for tabular biomedical data,\" in ICML, 2024, pp. 21844-21878. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 235, + 301, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 235, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 235, + 301, + 262 + ], + "type": "text", + "content": "[195] G. C. Cawley and N. L. C. Talbot, \"On over-fitting in model selection and subsequent selection bias in performance evaluation,\" Journal of Machine Learning Research, vol. 11, pp. 2079-2107, 2010. 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 263, + 301, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 301, + 290 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 301, + 290 + ], + "type": "text", + "content": "[196] T. G. Dietterich, \"Approximate statistical tests for comparing supervised classification learning algorithms,\" Neural Computation, vol. 10, no. 7, pp. 1895-1923, 1998. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 290, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 301, + 308 + ], + "type": "text", + "content": "[197] S. Raschka, \"Model evaluation, model selection, and algorithm selection in machine learning,\" CoRR, vol. abs/1811.12808, 2018. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 308, + 301, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 344 + ], + "type": "text", + "content": "[198] H. Schulz-Kumpel, S. Fischer, T. Nagler, A. Boulesteix, B. Bischl, and R. Hornung, \"Constructing confidence intervals for 'the' generalization error - a comprehensive benchmark study,\" CoRR, vol. abs/2409.18836, 2024. 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 345, + 301, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 345, + 301, + 371 + ], + "spans": [ + { + "bbox": [ + 47, + 345, + 301, + 371 + ], + "type": "text", + "content": "[199] T. Nagler, L. Schneider, B. Bischl, and M. Feurer, \"Reshuffling resampling splits can improve generalization of hyperparameter optimization,\" in NeurIPS, 2024. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 372, + 301, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 372, + 301, + 390 + ], + "spans": [ + { + "bbox": [ + 47, + 372, + 301, + 390 + ], + "type": "text", + "content": "[200] J. Feng, Y. Yu, and Z. Zhou, \"Multi-layered gradient boosting decision trees,\" in NeurIPS, 2018, pp. 3555-3565. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 391, + 301, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 391, + 301, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 391, + 301, + 426 + ], + "type": "text", + "content": "[201] I. Padhi, Y. Schiff, I. Melnyk, M. Rigotti, Y. Mroueh, P. Dognin, J. Ross, R. Nair, and E. Altman, \"Tabular transformers for modeling multivariate time series,\" in ICASSP, 2021, pp. 3565-3569. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 426, + 301, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 301, + 462 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 301, + 462 + ], + "type": "text", + "content": "[202] F. Di Martino and F. Delmastro, \"Explainable ai for clinical and remote health applications: a survey on tabular and time series data,\" Artificial Intelligence Review, vol. 56, no. 6, pp. 5261-5315, 2023. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 463, + 301, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 463, + 301, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 463, + 301, + 491 + ], + "type": "text", + "content": "[203] G. M. Van de Ven, T. Tuytelaars, and A. S. Tolias, \"Three types of incremental learning,\" Nature Machine Intelligence, vol. 4, no. 12, pp. 1185-1197, 2022. 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 491, + 301, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 301, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 301, + 525 + ], + "type": "text", + "content": "[204] D.-W. Zhou, Q.-W. Wang, Z.-H. Qi, H.-J. Ye, D.-C. Zhan, and Z. Liu, \"Class-incremental learning: A survey,\" IEEE transactions on pattern analysis and machine intelligence, vol. 46, no. 12, pp. 9851-9873, 2024. 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 526, + 301, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 526, + 301, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 526, + 301, + 544 + ], + "type": "text", + "content": "[205] J. Yosinski, J. Clune, Y. Bengio, and H. Lipson, \"How transferable are features in deep neural networks?\" in NIPS, vol. 27, 2014. 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 545, + 301, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 301, + 581 + ], + "type": "text", + "content": "[206] S. U. H. Dar, M. Özbey, A. B. Çatlı, and T. Çukur, \"A transfer-learning approach for accelerated mri using deep neural networks,\" Magnetic resonance in medicine, vol. 84, no. 2, pp. 663-685, 2020. 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 582, + 301, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 301, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 301, + 608 + ], + "type": "text", + "content": "[207] Y. Cao, Z. Fang, Y. Wu, D.-X. Zhou, and Q. Gu, \"Towards understanding the spectral bias of deep learning,\" CoRR, vol. abs/1912.01198, 2019. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 609, + 301, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 301, + 636 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 301, + 636 + ], + "type": "text", + "content": "[208] R. Basri, M. Galun, A. Geifman, D. Jacobs, Y. Kasten, and S. Kritchman, \"Frequency bias in neural networks for input of non-uniform density,\" in ICML, 2020, pp. 685-694. 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 637, + 301, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 637, + 301, + 664 + ], + "spans": [ + { + "bbox": [ + 47, + 637, + 301, + 664 + ], + "type": "text", + "content": "[209] F. Matteucci, V. Arzamasov, and K. Böhm, \"A benchmark of categorical encoders for binary classification,\" in NeurIPS, 2023, pp. 54855-54875. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 664, + 301, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 664, + 301, + 691 + ], + "spans": [ + { + "bbox": [ + 47, + 664, + 301, + 691 + ], + "type": "text", + "content": "[210] J. Yan, J. Chen, Q. Wang, D. Z. Chen, and J. Wu, \"Team up gbdts and dnns: Advancing efficient and effective tabular prediction with tree-hybrid mlp's,\" in SIGKDD, 2024, pp. 3679-3689. 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 691, + 301, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 691, + 301, + 718 + ], + "spans": [ + { + "bbox": [ + 47, + 691, + 301, + 718 + ], + "type": "text", + "content": "[211] M. Pang, K. M. Ting, P. Zhao, and Z. Zhou, \"Improving deep forest by screening,\" IEEE Transactions on Knowledge and Data Engineering., vol. 34, no. 9, pp. 4298-4312, 2022. 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 719, + 301, + 746 + ], + "type": "text", + "content": "[212] M. T. Ribeiro, S. Singh, and C. Guestrin, \"why should I trust you?: Explaining the predictions of any classifier,\" in KDD, 2016, pp. 1135-1144. 8" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 565, + 746 + ], + "type": "list", + "angle": 0, + "index": 54, + "blocks": [ + { + "bbox": [ + 310, + 44, + 564, + 62 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 44, + 564, + 62 + ], + "spans": [ + { + "bbox": [ + 310, + 44, + 564, + 62 + ], + "type": "text", + "content": "[213] S. M. Lundberg and S. Lee, “A unified approach to interpreting model predictions,” in NIPS, 2017, pp. 4765-4774. 8" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 63, + 565, + 80 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 63, + 565, + 80 + ], + "spans": [ + { + "bbox": [ + 310, + 63, + 565, + 80 + ], + "type": "text", + "content": "[214] Z.-H. Zhou and J. Feng, \"Deep forest,\" National science review, vol. 6, no. 1, pp. 74-86, 2019. 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 81, + 565, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 81, + 565, + 108 + ], + "spans": [ + { + "bbox": [ + 310, + 81, + 565, + 108 + ], + "type": "text", + "content": "[215] Y. Cheng, R. Hu, H. Ying, X. Shi, J. Wu, and W. Lin, \"Arithmetic feature interaction is necessary for deep tabular learning,\" in AAAI, 2024, pp. 11516-11524. 9, 12, 13" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 109, + 565, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 109, + 565, + 144 + ], + "spans": [ + { + "bbox": [ + 310, + 109, + 565, + 144 + ], + "type": "text", + "content": "[216] J. Kossen, N. Band, C. Lyle, A. N. Gomez, T. Rainforth, and Y. Gal, \"Self-attention between datapoints: Going beyond individual input-output pairs in deep learning,\" in NeurIPS, 2021, pp. 28742-28756. 9" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 144, + 565, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 144, + 565, + 171 + ], + "spans": [ + { + "bbox": [ + 310, + 144, + 565, + 171 + ], + "type": "text", + "content": "[217] B. Schäfl, L. Gruber, A. Bitto-Nemling, and S. Hochreiter, \"Hop- ular: Modern hopfield networks for tabular data,\" CoRR, vol. abs/2206.00664, 2022. 9, 10" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 172, + 565, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 172, + 565, + 198 + ], + "spans": [ + { + "bbox": [ + 310, + 172, + 565, + 198 + ], + "type": "text", + "content": "[218] H. Kim, A. Mnih, J. Schwarz, M. Garnelo, S. M. A. Eslami, D. Rosenbaum, O. Vinyals, and Y. W. Teh, \"Attentive neural processes,\" in ICLR, 2019. 9, 10" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 199, + 565, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 199, + 565, + 225 + ], + "spans": [ + { + "bbox": [ + 310, + 199, + 565, + 225 + ], + "type": "text", + "content": "[219] I. Shavitt and E. Segal, \"Regularization learning networks: deep learning for tabular datasets,\" in NeurIPS, 2018, pp. 1386-1396. 9, 10" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 226, + 565, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 226, + 565, + 254 + ], + "spans": [ + { + "bbox": [ + 310, + 226, + 565, + 254 + ], + "type": "text", + "content": "[220] V. Verma, T. Luong, K. Kawaguchi, H. Pham, and Q. V. Le, \"Towards domain-agnostic contrastive learning,\" in ICML, 2021, pp. 10530-10541. 9, 14" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 254, + 565, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 254, + 565, + 280 + ], + "spans": [ + { + "bbox": [ + 310, + 254, + 565, + 280 + ], + "type": "text", + "content": "[221] C. Lee, F. Imrie, and M. van der Schaar, \"Self-supervision enhanced feature selection with correlated gates,\" in ICLR, 2022. 9, 14" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 281, + 565, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 281, + 565, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 281, + 565, + 308 + ], + "type": "text", + "content": "[222] R. Levin, V. Cherepanova, A. Schwarzschild, A. Bansal, C. B. Bruss, T. Goldstein, A. G. Wilson, and M. Goldblum, \"Transfer learning with deep tabular models,\" in ICLR, 2023. 9, 13, 14, 15" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 308, + 565, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 565, + 327 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 565, + 327 + ], + "type": "text", + "content": "[223] K. Majmundar, S. Goyal, P. Netrapalli, and P. Jain, \"MET: masked encoding for tabular data,\" CoRR, vol. abs/2206.08564, 2022. 9, 14" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 327, + 565, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 327, + 565, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 327, + 565, + 354 + ], + "type": "text", + "content": "[224] E. Hajiramezanali, N. L. Diamant, G. Scalia, and M. W. Shen, \"Stab: Self-supervised learning for tabular data,\" in NeurIPS Workshop, 2022. 9, 14" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 354, + 565, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 565, + 380 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 565, + 380 + ], + "type": "text", + "content": "[225] S. Chen, J. Wu, N. Hovakimyan, and H. Yao, \"Recontab: Regularized contrastive representation learning for tabular data,\" CoRR, vol. abs/2310.18541, 2023. 9, 14" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 381, + 565, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 381, + 565, + 409 + ], + "spans": [ + { + "bbox": [ + 310, + 381, + 565, + 409 + ], + "type": "text", + "content": "[226] W.-W. Du, W.-Y. Wang, and W.-C. Peng, \"Dora: Domain-based self-supervised learning framework for low-resource real estate appraisal,\" in CIKM, 2023, pp. 4552-4558. 9, 14" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 410, + 565, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 410, + 565, + 436 + ], + "spans": [ + { + "bbox": [ + 310, + 410, + 565, + 436 + ], + "type": "text", + "content": "[227] Y. Sui, T. Wu, J. C. Cresswell, G. Wu, G. Stein, X. S. Huang, X. Zhang, and M. Volkovs, \"Self-supervised representation learning from random data projectors,\" in ICLR, 2024. 9, 14" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 437, + 565, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 437, + 565, + 462 + ], + "spans": [ + { + "bbox": [ + 310, + 437, + 565, + 462 + ], + "type": "text", + "content": "[228] T. Iwata and A. Kumagai, \"Meta-learning from tasks with heterogeneous attribute spaces,\" in NeurIPS, 2020, pp. 6053-6063. 9, 13, 15" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 463, + 565, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 463, + 565, + 491 + ], + "spans": [ + { + "bbox": [ + 310, + 463, + 565, + 491 + ], + "type": "text", + "content": "[229] L. Liu, M. M. Fard, and S. Zhao, \"Distribution embedding networks for generalization from a diverse set of classification tasks,\" Transactions on Machine Learning Research, 2022. 9, 15" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 491, + 565, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 491, + 565, + 518 + ], + "spans": [ + { + "bbox": [ + 310, + 491, + 565, + 518 + ], + "type": "text", + "content": "[230] B. Zhu, X. Shi, N. Erickson, M. Li, G. Karypis, and M. Shoaran, \"Xtab: Cross-table pretraining for tabular transformers,\" in ICML, 2023, pp. 43181-43204. 9, 12, 13, 15" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 519, + 565, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 519, + 565, + 545 + ], + "spans": [ + { + "bbox": [ + 310, + 519, + 565, + 545 + ], + "type": "text", + "content": "[231] Y. Zhang, K. Gong, K. Zhang, H. Li, Y. Qiao, W. Ouyang, and X. Yue, \"Meta-transformer: A unified framework for multimodal learning,\" CoRR, vol. abs/2307.10802, 2023. 9, 15" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 545, + 565, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 545, + 565, + 572 + ], + "spans": [ + { + "bbox": [ + 310, + 545, + 565, + 572 + ], + "type": "text", + "content": "[232] G. Liu, J. Yang, and L. Wu, \"Ptab: Using the pre-trained language model for modeling tabular data,\" CoRR, vol. abs/2209.08060, 2022. 9, 16" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 573, + 565, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 573, + 565, + 599 + ], + "spans": [ + { + "bbox": [ + 310, + 573, + 565, + 599 + ], + "type": "text", + "content": "[233] M. J. Kim, L. Grinsztajn, and G. Varoquaux, \"CARTE: pretraining and transfer for tabular learning,\" in ICML, 2024, pp. 23843-23866. 9, 16, 17" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 600, + 565, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 600, + 565, + 635 + ], + "spans": [ + { + "bbox": [ + 310, + 600, + 565, + 635 + ], + "type": "text", + "content": "[234] Z. Cheng, T. Xie, P. Shi, C. Li, R. Nadkarni, Y. Hu, C. Xiong, D. Radev, M. Ostendorf, L. Zettlemoyer, N. A. Smith, and T. Yu, \"Binding language models in symbolic languages,\" in ICLR, 2023. 9, 16" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 636, + 565, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 636, + 565, + 663 + ], + "spans": [ + { + "bbox": [ + 310, + 636, + 565, + 663 + ], + "type": "text", + "content": "[235] T. Zhang, S. Wang, S. Yan, L. Jian, and Q. Liu, \"Generative table pre-training empowers models for tabular prediction,\" in EMNLP, 2023. 9, 16" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 664, + 565, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 664, + 565, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 664, + 565, + 700 + ], + "type": "text", + "content": "[236] T. Dinh, Y. Zeng, R. Zhang, Z. Lin, M. Gira, S. Rajput, J. yong Sohn, D. S. Papailiopoulos, and K. Lee, \"LIFT: language-interfaced fine-tuning for non-language machine learning tasks,\" in NeurIPS, 2022, pp. 11763-11784. 9, 16" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 700, + 565, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 565, + 727 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 565, + 727 + ], + "type": "text", + "content": "[237] R. Wang, Z. Wang, and J. Sun, \"Unipredict: Large language models are universal tabular predictors,\" CoRR, vol. abs/2310.03266, 2023. 9, 16" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 727, + 565, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 727, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 727, + 565, + 746 + ], + "type": "text", + "content": "[238] A. Sharma, E. Vans, D. Shigemizu, K. A. Boroevich, and T. Tsunoda, \"Deepinsight: A methodology to transform a non-image" + } + ] + } + ], + "index": 53 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 68, + 43, + 301, + 62 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 43, + 301, + 62 + ], + "spans": [ + { + "bbox": [ + 68, + 43, + 301, + 62 + ], + "type": "text", + "content": "data to an image for convolution neural network architecture,\" Scientific reports, vol. 9, no. 1, p. 11399, 2019. 9, 17" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 63, + 301, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 63, + 301, + 106 + ], + "spans": [ + { + "bbox": [ + 47, + 63, + 301, + 106 + ], + "type": "text", + "content": "[239] O. Bazgir, R. Zhang, S. R. Dhruba, R. Rahman, S. Ghosh, and R. Pal, \"Representation of features as images with neighborhood dependencies for compatibility with convolutional neural networks,\" Nature communications, vol. 11, no. 1, p. 4391, 2020. 9, 17" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 107, + 301, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 107, + 301, + 135 + ], + "spans": [ + { + "bbox": [ + 47, + 107, + 301, + 135 + ], + "type": "text", + "content": "[240] L. Buturovic and D. Miljkovic, \"A novel method for classification of tabular data using convolutional neural networks,\" BioRxiv, pp. 2020-05, 2020. 9, 17" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 135, + 301, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 135, + 301, + 180 + ], + "spans": [ + { + "bbox": [ + 47, + 135, + 301, + 180 + ], + "type": "text", + "content": "[241] V. Gómez-Martínez, F. J. Lara-Abelenda, P. Peiro-Corbacho, D. Chushig-Muzo, C. Granja, and C. Soguero-Ruiz, \"LM-IGTD: a 2d image generator for low-dimensional and mixed-type tabular data to leverage the potential of convolutional neural networks,\" CoRR, vol. abs/2406.14566, 2024. 9, 17" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 180, + 301, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 208 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 208 + ], + "type": "text", + "content": "[242] B. Sun, L. Yang, W. Zhang, M. Lin, P. Dong, C. Young, and J. Dong, \"Supertml: Two-dimensional word embedding for the precognition on structured tabular data,\" in CVPR Workshops, 2019. 9, 17" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 208, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 208, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 208, + 301, + 236 + ], + "type": "text", + "content": "[243] Z. Wang, C. Gao, C. Xiao, and J. Sun, \"Meditab: Scaling medical tabular data predictors via data consolidation, enrichment, and refinement,\" in *IJCAI*, 2024, pp. 6062-6070. 9, 19" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 236, + 301, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 301, + 271 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 301, + 271 + ], + "type": "text", + "content": "[244] R. Bommasani, D. A. Hudson, E. Adeli, R. Altman, S. Arora, S. von Arx, M. S. Bernstein, J. Bohg, A. Bosselut, E. Brunskill et al., \"On the opportunities and risks of foundation models,\" CoRR, vol. abs/2108.07258, 2021. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 271, + 301, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 271, + 301, + 290 + ], + "spans": [ + { + "bbox": [ + 47, + 271, + 301, + 290 + ], + "type": "text", + "content": "[245] J. Goldberger, G. E. Hinton, S. Roweis, and R. R. Salakhutdinov, \"Neighbourhood components analysis,\" in NIPS, vol. 17, 2004. 10" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 290, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 290, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 290, + 301, + 354 + ], + "type": "text", + "content": "[246] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D. M. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. McCandlish, A. Radford, I. Sutskever, and D. Amodei, \"Language models are few-shot learners,\" in NeurIPS, 2020, pp. 1877-1901. 10, 21" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 354, + 301, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 380 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 380 + ], + "type": "text", + "content": "[247] R. Tibshirani, \"Regression shrinkage and selection via the lasso,\" Journal of the Royal Statistical Society Series B: Statistical Methodology, vol. 58, no. 1, pp. 267-288, 1996. 10" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 380, + 301, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 301, + 408 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 301, + 408 + ], + "type": "text", + "content": "[248] A. E. Hoerl and R. W. Kennard, \"Ridge regression: Biased estimation for nonorthogonal problems,\" Technometrics, vol. 12, no. 1, pp. 55-67, 1970. 10" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 408, + 301, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 408, + 301, + 436 + ], + "spans": [ + { + "bbox": [ + 47, + 408, + 301, + 436 + ], + "type": "text", + "content": "[249] H. Zou and T. Hastie, “Zou h, hastie t. regularization and variable selection via the elastic net.” Journal of the Royal Statistical Society: Series B (Statistical Methodology), vol. 67, pp. 301–320, 2005. 10" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 436, + 301, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 436, + 301, + 462 + ], + "spans": [ + { + "bbox": [ + 47, + 436, + 301, + 462 + ], + "type": "text", + "content": "[250] J. T. Hancock and T. M. Khoshgoftaar, \"Survey on categorical data for neural networks,\" Journal of big data, vol. 7, no. 1, p. 28, 2020. 11" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 462, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 462, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 462, + 301, + 480 + ], + "type": "text", + "content": "[251] J. R. Quinlan, C4.5: programs for machine learning. Elsevier, 2014. 12" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 481, + 301, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 498 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 498 + ], + "type": "text", + "content": "[252] L. Breiman, \"Random forests,\" Machine learning, vol. 45, pp. 5-32, 2001. 12" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 498, + 301, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 498, + 301, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 498, + 301, + 526 + ], + "type": "text", + "content": "[253] Z.-H. Zhou and Y. Jiang, \"Nec4. 5: Neural ensemble based c4. 5,\" IEEE Transactions on knowledge and data engineering, vol. 16, no. 6, pp. 770-773, 2004. 12, 14, 20" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 526, + 301, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 526, + 301, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 526, + 301, + 544 + ], + "type": "text", + "content": "[254] T. Hastie and R. Tibshirani, \"Generalized additive models,\" Statistical science, vol. 1, no. 3, pp. 297-310, 1986. 12" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 544, + 301, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 301, + 581 + ], + "type": "text", + "content": "[255] R. Agarwal, L. Melnick, N. Frosst, X. Zhang, B. Lengerich, R. Caruana, and G. E. Hinton, \"Neural additive models: Interpretable machine learning with neural nets,\" in NeurIPS, 2021, pp. 4699-4711. 12, 20" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 581, + 301, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 581, + 301, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 581, + 301, + 609 + ], + "type": "text", + "content": "[256] W.-Y. Wang, W.-W. Du, D. Xu, W. Wang, and W.-C. Peng, \"A survey on self-supervised learning for non-sequential tabular data,\" Machine Learning, vol. 114, no. 1, p. 16, 2025. 13, 14" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 609, + 301, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 301, + 627 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 301, + 627 + ], + "type": "text", + "content": "[257] G. Hinton, O. Vinyals, and J. Dean, \"Distilling the knowledge in a neural network,\" CoRR, vol. abs/1503.02531, 2015. 14" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 627, + 301, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 627, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 627, + 301, + 654 + ], + "type": "text", + "content": "[258] S. Yun, D. Han, S. Chun, S. J. Oh, Y. Yoo, and J. Choe, \"Cutmix: Regularization strategy to train strong classifiers with localizable features,\" in ICCV, 2019, pp. 6023-6032. 14" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 654, + 301, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 673 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 673 + ], + "type": "text", + "content": "[259] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, \"mixup: Beyond empirical risk minimization,\" in ICLR, 2018. 14" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 673, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 673, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 673, + 301, + 700 + ], + "type": "text", + "content": "[260] C. Hou and Z.-H. Zhou, \"One-pass learning with incremental and decremental features,\" IEEE transactions on pattern analysis and machine intelligence, vol. 40, no. 11, pp. 2776-2792, 2017. 15" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 700, + 301, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 727 + ], + "type": "text", + "content": "[261] H.-J. Ye, D.-C. Zhan, Y. Jiang, and Z.-H. Zhou, \"Rectify heterogeneous models with semantic mapping,\" in ICML, 2018, pp. 5630-5639. 15" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "type": "text", + "content": "[262] H.-J. Ye, L. Han, and D.-C. Zhan, \"Revisiting unsupervised meta-learning via the characteristics of few-shot tasks,\" IEEE" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 43, + 565, + 746 + ], + "type": "list", + "angle": 0, + "index": 53, + "blocks": [ + { + "bbox": [ + 331, + 43, + 565, + 62 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 43, + 565, + 62 + ], + "spans": [ + { + "bbox": [ + 331, + 43, + 565, + 62 + ], + "type": "text", + "content": "Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 3, pp. 3721-3737, 2022. 15" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 63, + 565, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 63, + 565, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 63, + 565, + 99 + ], + "type": "text", + "content": "[263] Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis, L. Zettlemoyer, and V. Stoyanov, \"Roberta: A robustly optimized bert pretraining approach,\" CoRR, vol. abs/1907.11692, 2019. 16" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 100, + 565, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 565, + 119 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 565, + 119 + ], + "type": "text", + "content": "[264] F. Mahdisoltani, J. Biega, and F. M. Suchanek, \"YAGO3: A knowledge base from multilingual wikipediais,\" in CIDR, 2015. 16" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 119, + 565, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 119, + 565, + 155 + ], + "spans": [ + { + "bbox": [ + 310, + 119, + 565, + 155 + ], + "type": "text", + "content": "[265] N. Hollmann, S. Müller, and F. Hutter, \"Large language models for automated data science: Introducing caafe for context-aware automated feature engineering,\" in NeurIPS, 2023, pp. 44753-44775. 16" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 156, + 565, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 156, + 565, + 184 + ], + "spans": [ + { + "bbox": [ + 310, + 156, + 565, + 184 + ], + "type": "text", + "content": "[266] S. Han, J. Yoon, S. O. Arik, and T. Pfister, \"Large language models can automatically engineer features for few-shot tabular learning,\" in ICML, 2024, pp. 17454-17479. 16" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 184, + 565, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 184, + 565, + 212 + ], + "spans": [ + { + "bbox": [ + 310, + 184, + 565, + 212 + ], + "type": "text", + "content": "[267] J. Herzig, P. K. Nowak, T. Müller, F. Piccinno, and J. M. Eisenschlos, \"Tapas: Weakly supervised table parsing via pre-training,\" in ACL, 2020, pp. 4320-4333. 16" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 212, + 565, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 212, + 565, + 239 + ], + "spans": [ + { + "bbox": [ + 310, + 212, + 565, + 239 + ], + "type": "text", + "content": "[268] P. Yin, G. Neubig, W. tau Yih, and S. Riedel, \"Tabert: Pretraining for joint understanding of textual and tabular data,\" in ACL, 2020, pp. 8413-8426. 16" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 240, + 565, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 240, + 565, + 268 + ], + "spans": [ + { + "bbox": [ + 310, + 240, + 565, + 268 + ], + "type": "text", + "content": "[269] M. Chen, L. Shen, Z. Li, X. J. Wang, J. Sun, and C. Liu, \"Visions: Visual masked autoencoders are free-lunch zero-shot time series forecasters,\" CoRR, vol. abs/2408.17253, 2024. 16" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 268, + 565, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 268, + 565, + 295 + ], + "spans": [ + { + "bbox": [ + 310, + 268, + 565, + 295 + ], + "type": "text", + "content": "[270] Z. Li, S. Li, and X. Yan, \"Time series as images: Vision transformer for irregularly sampled time series,\" in NeurIPS, 2023, pp. 49 187-49 204. 16" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 295, + 565, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 295, + 565, + 324 + ], + "spans": [ + { + "bbox": [ + 310, + 295, + 565, + 324 + ], + "type": "text", + "content": "[271] A. Kirillov, E. Mintun, N. Ravi, H. Mao, C. Rolland, L. Gustafson, T. Xiao, S. Whitehead, A. C. Berg, W.-Y. Lo, P. Dólar, and R. B. Girshick, \"Segment anything,\" in ICCV, 2023, pp. 3992-4003. 17" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 324, + 565, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 324, + 565, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 324, + 565, + 342 + ], + "type": "text", + "content": "[272] D. Ha, A. M. Dai, and Q. V. Le, \"Hypernetworks,\" in ICLR, 2017. 18" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 343, + 565, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 343, + 565, + 371 + ], + "spans": [ + { + "bbox": [ + 310, + 343, + 565, + 371 + ], + "type": "text", + "content": "[273] W.-L. Chao, H.-J. Ye, D.-C. Zhan, M. E. Campbell, and K. Q. Weinberger, “Revisiting meta-learning as supervised learning,” CoRR, vol. abs/2002.00573, 2020. 18" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 371, + 565, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 371, + 565, + 391 + ], + "spans": [ + { + "bbox": [ + 310, + 371, + 565, + 391 + ], + "type": "text", + "content": "[274] J. Peters, D. Janzing, and B. Scholkopf, Elements of causal inference: foundations and learning algorithms. The MIT Press, 2017. 18" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 391, + 565, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 391, + 565, + 409 + ], + "spans": [ + { + "bbox": [ + 310, + 391, + 565, + 409 + ], + "type": "text", + "content": "[275] R. Neal, Bayesian Learning for Neural Networks, ser. Incs. springer, 1996. 18" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 410, + 565, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 410, + 565, + 437 + ], + "spans": [ + { + "bbox": [ + 310, + 410, + 565, + 437 + ], + "type": "text", + "content": "[276] S. Müller, N. Hollmann, S. Pineda-Arango, J. Grabocka, and F. Hutter, \"Transformers can do bayesian inference,\" in ICLR, 2022. 18" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 437, + 565, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 437, + 565, + 465 + ], + "spans": [ + { + "bbox": [ + 310, + 437, + 565, + 465 + ], + "type": "text", + "content": "[277] H.-J. Ye, S.-Y. Liu, and W.-L. Chao, \"A closer look at tabpfn v2: Strength, limitation, and extension,\" CoRR, vol. abs/2502.17361, 2025. 18" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 466, + 565, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 466, + 565, + 494 + ], + "spans": [ + { + "bbox": [ + 310, + 466, + 565, + 494 + ], + "type": "text", + "content": "[278] T. Iwata and A. Kumagai, \"Meta-learning of semi-supervised learning from tasks with heterogeneous attribute spaces,\" CoRR, vol. abs/2311.05088, 2023. 18" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 495, + 565, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 495, + 565, + 522 + ], + "spans": [ + { + "bbox": [ + 310, + 495, + 565, + 522 + ], + "type": "text", + "content": "[279] T. Nagler, \"Statistical foundations of prior-data fitted networks,\" in ICML, A. Krause, E. Brunskill, K. Cho, B. Engelhardt, S. Sabato, and J. Scarlett, Eds., 2023, pp. 25660-25676. 18" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 522, + 565, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 522, + 565, + 549 + ], + "spans": [ + { + "bbox": [ + 310, + 522, + 565, + 549 + ], + "type": "text", + "content": "[280] J. Ma, A. Dankar, G. Stein, G. Yu, and A. L. Caterini, \"Tabpfgen - tabular data generation with tabpfn,\" CoRR, vol. abs/2406.05216, 2024. 18" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 550, + 565, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 565, + 587 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 565, + 587 + ], + "type": "text", + "content": "[281] S. Ruiz-Villafranca, J. R. Gómez, J. M. C. Gómez, J. C. Mondéjar, and J. L. Martínez, \"A tabpfn-based intrusion detection system for the industrial internet of things,\" The Journal of Supercomputing, vol. 80, no. 14, pp. 20080-20117, 2024. 18" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 587, + 565, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 587, + 565, + 624 + ], + "spans": [ + { + "bbox": [ + 310, + 587, + 565, + 624 + ], + "type": "text", + "content": "[282] A. Margeloiu, A. Bazaga, N. Simidjievski, P. Lio, and M. Jamnik, \"Tabmda: Tabular manifold data augmentation for any classifier using transformers with in-context subsetting,\" CoRR, vol. abs/2406.01805, 2024. 18" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 624, + 565, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 624, + 565, + 661 + ], + "spans": [ + { + "bbox": [ + 310, + 624, + 565, + 661 + ], + "type": "text", + "content": "[283] S. B. Hoo, S. Müller, D. Salinas, and F. Hutter, \"The tabular foundation model tabpfn outperforms specialized time series forecasting models based on simple features,\" CoRR, vol. abs/2501.02945, 2025. 18" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 661, + 565, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 661, + 565, + 689 + ], + "spans": [ + { + "bbox": [ + 310, + 661, + 565, + 689 + ], + "type": "text", + "content": "[284] F. den Breejen, S. Bae, S. Cha, and S.-Y. Yun, \"Fine-tuned in-context learning transformers are excellent tabular data classifiers,\" CoRR, vol. abs/2405.13396v2, 2025. 18, 19" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 689, + 565, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 689, + 565, + 717 + ], + "spans": [ + { + "bbox": [ + 310, + 689, + 565, + 717 + ], + "type": "text", + "content": "[285] Y. Wu and D. L. Bergman, \"Zero-shot meta-learning for tabular prediction tasks with adversarially pre-trained transformer,\" CoRR, vol. abs/2502.04573, 2025. 18" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 717, + 565, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 717, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 717, + 565, + 746 + ], + "type": "text", + "content": "[286] J. Qu, D. Holzmüller, G. Varoquaux, and M. L. Morvan, \"Tabicl: A tabular foundation model for in-context learning on large data,\" CoRR, vol. abs/2502.05564, 2025. 18, 19" + } + ] + } + ], + "index": 52 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 44, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 47, + 44, + 301, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 44, + 301, + 71 + ], + "spans": [ + { + "bbox": [ + 47, + 44, + 301, + 71 + ], + "type": "text", + "content": "[287] B. Feuer, C. Hegde, and N. Cohen, \"Scaling tabpfn: Sketching and feature selection for tabular prior-data fitted networks,\" CoRR, vol. abs/2311.10609, 2023. 18" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 89 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 89 + ], + "type": "text", + "content": "[288] J. Ma, V. Thomas, G. Yu, and A. L. Caterini, \"In-context data distillation with tabpfn,\" CoRR, vol. abs/2402.06971, 2024. 18" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 90, + 301, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 90, + 301, + 126 + ], + "spans": [ + { + "bbox": [ + 47, + 90, + 301, + 126 + ], + "type": "text", + "content": "[289] B. Feuer, R. T. Schirrmeister, V. Cherepanova, C. Hegde, F. Hutter, M. Goldblum, N. Cohen, and C. White, \"Tunetables: Context optimization for scalable prior-data fitted networks,\" in NeurIPS, 2024, pp. 83430-83464. 18, 19" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 126, + 301, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 126, + 301, + 153 + ], + "spans": [ + { + "bbox": [ + 47, + 126, + 301, + 153 + ], + "type": "text", + "content": "[290] D. Xu, O. Cirit, R. Asadi, Y. Sun, and W. Wang, \"Mixture of in-context prompters for tabular pfns,\" CoRR, vol. abs/2405.16156, 2024. 19" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 154, + 301, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 154, + 301, + 181 + ], + "spans": [ + { + "bbox": [ + 47, + 154, + 301, + 181 + ], + "type": "text", + "content": "[291] M. Koshil, T. Nagler, M. Feurer, and K. Eggensperger, \"Towards localization via data embedding for tabPFN,\" in NeurIPS Workshop, 2024. 19" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "spans": [ + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "type": "text", + "content": "[292] Y. Zeng, W. Kang, and A. C. Mueller, \"Tabflex: Scaling tabular learning to millions with linear attention,\" in NeurIPS Workshop, 2024. 19, 21" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "type": "text", + "content": "[293] S. K. Baur and S. Kim, “Exploration of autoregressive models for in-context learning on tabular data,” in NeurIPS Workshop, 2024. 19" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 237, + 301, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 237, + 301, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 237, + 301, + 264 + ], + "type": "text", + "content": "[294] M. Arbel, D. Salinas, and F. Hutter, \"Equitabpfn: A target-permutation equivariant prior fitted networks,\" CoRR, vol. abs/2502.06684, 2025. 19" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 265, + 301, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 265, + 301, + 292 + ], + "spans": [ + { + "bbox": [ + 47, + 265, + 301, + 292 + ], + "type": "text", + "content": "[295] Y. Sun, X. Wen, S. Zheng, X. Jia, and J. Bian, \"Scaling generative tabular learning for large language models,\" in NeurIPS Workshop, 2024. 19" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 293, + 301, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 293, + 301, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 293, + 301, + 312 + ], + "type": "text", + "content": "[296] Y. Freund, R. E. Schapire et al., \"Experiments with a new boosting algorithm,\" in ICML, vol. 96, 1996, pp. 148-156. 19" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 312, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 312, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 47, + 312, + 301, + 330 + ], + "type": "text", + "content": "[297] Z.-H. Zhou, Ensemble methods: foundations and algorithms. CRC press, 2012. 19" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 331, + 301, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 357 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 357 + ], + "type": "text", + "content": "[298] Y. Wen, D. Tran, and J. Ba, \"Batchsemble: an alternative approach to efficient ensemble and lifelong learning,\" in ICLR, 2020. 19" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 357, + 301, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 357, + 301, + 394 + ], + "spans": [ + { + "bbox": [ + 47, + 357, + 301, + 394 + ], + "type": "text", + "content": "[299] M. Jayawardhana, Renbo, S. Dooley, V. Cherepanova, A. G. Wilson, F. Hutter, C. White, T. Goldstein, and M. Goldblum, \"Transformers boost the performance of decision trees on tabular data across sample sizes,\" CoRR, vol. abs/2502.02672v2, 2025. 19" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 395, + 301, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 395, + 301, + 413 + ], + "spans": [ + { + "bbox": [ + 47, + 395, + 301, + 413 + ], + "type": "text", + "content": "[300] R. Caruana, A. Munson, and A. Niculescu-Mizil, “Getting the most out of ensemble selection,” in ICDM, 2006, pp. 828-833. 20" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 413, + 301, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 301, + 440 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 301, + 440 + ], + "type": "text", + "content": "[301] Y. Wang, B. Jiang, Y. Guo, Q. Gan, D. Wipf, X. Huang, and X. Qiu, \"Prior-fitted networks scale to larger datasets when treated as weak learners,\" CoRR, vol. abs/2503.01256, 2025. 20" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 441, + 301, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 441, + 301, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 441, + 301, + 459 + ], + "type": "text", + "content": "[302] J. C. Gower, \"A general coefficient of similarity and some of its properties,\" Biometrics, pp. 857-871, 1971. 20" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 460, + 301, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 460, + 301, + 477 + ], + "spans": [ + { + "bbox": [ + 47, + 460, + 301, + 477 + ], + "type": "text", + "content": "[303] F. T. Liu, K. M. Ting, and Z.-H. Zhou, \"Isolation forest,\" in ICDM, 2008, pp. 413-422. 20" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 478, + 301, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 478, + 301, + 505 + ], + "spans": [ + { + "bbox": [ + 47, + 478, + 301, + 505 + ], + "type": "text", + "content": "[304] M. M. Breunig, H.-P. Kriegel, R. T. Ng, and J. Sander, “Lof: identifying density-based local outliers,” in SIGMOD, 2000, pp. 93-104. 20" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 506, + 301, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 506, + 301, + 524 + ], + "spans": [ + { + "bbox": [ + 47, + 506, + 301, + 524 + ], + "type": "text", + "content": "[305] T. Shenkar and L. Wolf, \"Anomaly detection for tabular data with internal contrastive learning,\" in ICLR, 2022. 20" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 525, + 301, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 301, + 551 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 301, + 551 + ], + "type": "text", + "content": "[306] A. Li, Y. Zhao, C. Qiu, M. Kloft, P. Smyth, M. Rudolph, and S. Mandt, \"Anomaly detection of tabular data using llms,\" CoRR, vol. abs/2406.16308, 2024. 20" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 552, + 301, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 552, + 301, + 579 + ], + "spans": [ + { + "bbox": [ + 47, + 552, + 301, + 579 + ], + "type": "text", + "content": "[307] C. Lee, J. Kim, and N. Park, \"Codi: Co-evolving contrastive diffusion models for mixed-type tabular synthesis,\" in ICML, 2023, pp. 18940-18956. 20" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 580, + 301, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 580, + 301, + 606 + ], + "spans": [ + { + "bbox": [ + 47, + 580, + 301, + 606 + ], + "type": "text", + "content": "[308] R. Tu, Z. Senane, L. Cao, C. Zhang, H. Kjellström, and G. E. Henter, \"Causality for tabular data synthesis: A high-order structure causal benchmark framework,\" CoRR, vol. abs/2406.08311, 2024. 20" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 607, + 301, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 607, + 301, + 633 + ], + "spans": [ + { + "bbox": [ + 47, + 607, + 301, + 633 + ], + "type": "text", + "content": "[309] R. Feinman and B. M. Lake, \"Generating new concepts with hybrid neuro-symbolic models,\" CoRR, vol. abs/2003.08978, 2020. 20" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 634, + 301, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 634, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 47, + 634, + 301, + 653 + ], + "type": "text", + "content": "[310] T. Hastie, “The elements of statistical learning: data mining, inference, and prediction,” 2009. 20" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 654, + 301, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 672 + ], + "type": "text", + "content": "[311] B. M. Greenwell et al., \"pdp: An r package for constructing partial dependence plots,\" R Journal, vol. 9, no. 1, p. 421, 2017. 20" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 672, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 672, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 672, + 301, + 700 + ], + "type": "text", + "content": "[312] K.-Y. Chen, P.-H. Chiang, H.-R. Chou, C.-S. Chen, and D. T.-H. Chang, \"Dofen: Deep oblivious forest ensemble,\" in NeurIPS, 2024, pp. 44624-44677. 20" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 47, + 700, + 301, + 726 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 726 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 726 + ], + "type": "text", + "content": "[313] B. Sun and K. Saenko, \"Deep CORAL: correlation alignment for deep domain adaptation,\" in ECCV Workshops (3), 2016, pp. 443-450. 20" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "type": "text", + "content": "[314] C. Kim, T. Kim, S. Woo, J. Y. Yang, and E. Yang, \"Adaptable: Test-time adaptation for tabular data via shift-aware uncertainty cali" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 567, + 745 + ], + "type": "list", + "angle": 0, + "index": 54, + "blocks": [ + { + "bbox": [ + 331, + 44, + 565, + 61 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 44, + 565, + 61 + ], + "spans": [ + { + "bbox": [ + 331, + 44, + 565, + 61 + ], + "type": "text", + "content": "brator and label distribution handler,\" CoRR, vol. abs/2407.10784, 2024. 20" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 63, + 566, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 63, + 566, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 63, + 566, + 99 + ], + "type": "text", + "content": "[315] Y. Ganin, E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, and V. S. Lempitsky, \"Domain-adversarial training of neural networks,\" J. Mach. Learn. Res., vol. 17, pp. 59:1-59:35, 2016. 20" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 100, + 566, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 566, + 118 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 566, + 118 + ], + "type": "text", + "content": "[316] S. Sagawa, P. W. Koh, T. B. Hashimoto, and P. Liang, \"Distribu-tionally robust neural networks,\" in ICLR, 2020. 20" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 119, + 566, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 119, + 566, + 147 + ], + "spans": [ + { + "bbox": [ + 310, + 119, + 566, + 147 + ], + "type": "text", + "content": "[317] D. Levy, Y. Carmon, J. C. Duchi, and A. Sidford, \"Large-scale methods for distributionally robust optimization,\" in NeurIPS, 2020, pp. 8847-8860. 20" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 148, + 566, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 148, + 566, + 175 + ], + "spans": [ + { + "bbox": [ + 310, + 148, + 566, + 175 + ], + "type": "text", + "content": "[318] J. Zhang, A. K. Menon, A. Veit, S. Bhojanapalli, S. Kumar, and S. Sra, \"Coping with label shift via distributionally robust optimisation,\" in ICLR, 2021. 20" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 175, + 566, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 175, + 566, + 201 + ], + "spans": [ + { + "bbox": [ + 310, + 175, + 566, + 201 + ], + "type": "text", + "content": "[319] H.-R. Cai and H.-J. Ye, \"Understanding the limits of deep tabular methods with temporal shift,\" CoRR, vol. abs/2502.20260, 2025. 21" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 203, + 566, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 203, + 566, + 232 + ], + "spans": [ + { + "bbox": [ + 310, + 203, + 566, + 232 + ], + "type": "text", + "content": "[320] W. Huang, \"Multimodal contrastive learning and tabular attention for automated alzheimers disease prediction,\" in ICCV (Workshops), 2023, pp. 2465-2474. 21" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 232, + 566, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 232, + 566, + 259 + ], + "spans": [ + { + "bbox": [ + 310, + 232, + 566, + 259 + ], + "type": "text", + "content": "[321] S. Du, S. Zheng, Y. Wang, W. Bai, D. P. O'Regan, and C. Qin, \"Tip: Tabular-image pre-training for multimodal classification with incomplete data,\" in ECCV, 2024, pp. 478-496. 21" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 260, + 566, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 260, + 566, + 279 + ], + "spans": [ + { + "bbox": [ + 310, + 260, + 566, + 279 + ], + "type": "text", + "content": "[322] A. Gilani, S. R. Qasim, I. Malik, and F. Shafait, \"Table detection using deep learning,\" in ICDAR, 2017, pp. 771-776. 21" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 280, + 566, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 280, + 566, + 307 + ], + "spans": [ + { + "bbox": [ + 310, + 280, + 566, + 307 + ], + "type": "text", + "content": "[323] M. Li, L. Cui, S. Huang, F. Wei, M. Zhou, and Z. Li, \"Tablebank: Table benchmark for image-based table detection and recognition,\" in LREC, 2020, pp. 1918-1925. 21" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 308, + 566, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 566, + 335 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 566, + 335 + ], + "type": "text", + "content": "[324] S. Schreiber, S. Agne, I. Wolf, A. Dengel, and S. Ahmed, \"Deepdesrt: Deep learning for detection and structure recognition of tables in document images,\" in ICDAR, 2017, pp. 1162-1167. 21" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 336, + 566, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 336, + 566, + 380 + ], + "spans": [ + { + "bbox": [ + 310, + 336, + 566, + 380 + ], + "type": "text", + "content": "[325] M. s. Kasem, A. Abdallah, A. Berendeyev, E. Elkady, M. Mahmoud, M. Abdalla, M. Hamada, S. Vascon, D. Nurseitov, and I. Taj-eddin, \"Deep learning for table detection and structure recognition: A survey,\" ACM Computing Surveys, vol. 56, no. 12, pp. 1-41, 2024. 21" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 381, + 566, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 381, + 566, + 409 + ], + "spans": [ + { + "bbox": [ + 310, + 381, + 566, + 409 + ], + "type": "text", + "content": "[326] W. Chen, M.-W. Chang, E. Schlinger, W. Wang, and W. W. Cohen, \"Open question answering over tables and text,\" CoRR, vol. abs/2010.10439, 2020. 21" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 410, + 566, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 410, + 566, + 446 + ], + "spans": [ + { + "bbox": [ + 310, + 410, + 566, + 446 + ], + "type": "text", + "content": "[327] A. Talmor, O. Yoran, A. Catav, D. Lahav, Y. Wang, A. Asai, G. Ilharco, H. Hajishirzi, and J. Berant, \"Multimodalqa: Complex question answering over text, tables and images,\" CoRR, vol. abs/2104.06039, 2021. 21" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 447, + 567, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 567, + 475 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 567, + 475 + ], + "type": "text", + "content": "[328] S. Appalaraju, B. Jasani, B. U. Kota, Y. Xie, and R. Manmatha, \"Docformer: End-to-end transformer for document understanding,\" in ICCV, 2021, pp. 993-1003. 21" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 475, + 566, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 475, + 566, + 503 + ], + "spans": [ + { + "bbox": [ + 310, + 475, + 566, + 503 + ], + "type": "text", + "content": "[329] C. Da, P. Wang, and C. Yao, \"Multi-granularity prediction with learnable fusion for scene text recognition,\" CoRR, vol. abs/2307.13244, 2023. 21" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 504, + 566, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 566, + 540 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 566, + 540 + ], + "type": "text", + "content": "[330] Z. Gu, C. Meng, K. Wang, J. Lan, W. Wang, M. Gu, and L. Zhang, \"Xylayoutlm: Towards layout-aware multimodal networks for visually-rich document understanding,\" in CVPR, 2022, pp. 4583-4592. 21" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 541, + 566, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 541, + 566, + 568 + ], + "spans": [ + { + "bbox": [ + 310, + 541, + 566, + 568 + ], + "type": "text", + "content": "[331] A. Nassar, N. Livathinos, M. Lysak, and P. Staar, \"Tableformer: Table structure understanding with transformers,\" in CVPR, 2022, pp. 4614-4623. 21" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 569, + 566, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 569, + 566, + 597 + ], + "spans": [ + { + "bbox": [ + 310, + 569, + 566, + 597 + ], + "type": "text", + "content": "[332] G. Kim, T. Hong, M. Yim, J. Park, J. Yim, W. Hwang, S. Yun, D. Han, and S. Park, \"Donut: Document understanding transformer withoutOCR,\" CoRR, vol. abs/2111.15664, 2021. 21" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 597, + 566, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 597, + 566, + 633 + ], + "spans": [ + { + "bbox": [ + 310, + 597, + 566, + 633 + ], + "type": "text", + "content": "[333] H. Feng, Z. Wang, J. Tang, J. Lu, W. Zhou, H. Li, and C. Huang, \"Unidoc: A universal large multimodal model for simultaneous text detection, recognition, spotting and understanding,\" CoRR, vol. abs/2308.11592, 2023. 21" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 634, + 566, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 634, + 566, + 672 + ], + "spans": [ + { + "bbox": [ + 310, + 634, + 566, + 672 + ], + "type": "text", + "content": "[334] J. Wan, S. Song, W. Yu, Y. Liu, W. Cheng, F. Huang, X. Bai, C. Yao, and Z. Yang, \"Omniparser: A unified framework for text spotting key information extraction and table recognition,\" in CVPR, 2024, pp. 15641-15653. 21" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 672, + 566, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 672, + 566, + 708 + ], + "spans": [ + { + "bbox": [ + 310, + 672, + 566, + 708 + ], + "type": "text", + "content": "[335] W. Zhao, H. Feng, Q. Liu, J. Tang, S. Wei, B. Wu, L. Liao, Y. Ye, H. Liu, W. Zhou et al., \"Tabpedia: Towards comprehensive visual table understanding with concept synergy,\" CoRR, vol. abs/2406.01326, 2024. 21" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 709, + 566, + 745 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 709, + 566, + 745 + ], + "spans": [ + { + "bbox": [ + 310, + 709, + 566, + 745 + ], + "type": "text", + "content": "[336] Z. Li, B. Yang, Q. Liu, Z. Ma, S. Zhang, J. Yang, Y. Sun, Y. Liu, and X. Bai, \"Monkey: Image resolution and text label are important things for large multi-modal models,\" in CVPR, 2024, pp. 26763-26773. 21" + } + ] + } + ], + "index": 53 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 46, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 43, + 302, + 199 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 46, + 43, + 302, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 43, + 302, + 71 + ], + "spans": [ + { + "bbox": [ + 46, + 43, + 302, + 71 + ], + "type": "text", + "content": "[337] Y. Liu, B. Yang, Q. Liu, Z. Li, Z. Ma, S. Zhang, and X. Bai, \"Textmonkey: AnOCR-free large multimodal model for understanding document,\" CoRR, vol. abs/2403.04473, 2024. 21" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 72, + 302, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 302, + 106 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 302, + 106 + ], + "type": "text", + "content": "[338] J. Ye, A. Hu, H. Xu, Q. Ye, M. Yan, Y. Dan, C. Zhao, G. Xu, C. Li, J. Tian et al., \"mplug-docowl: Modularized multimodal large language model for document understanding,\" CoRR, vol. abs/2307.02499, 2023. 21" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 107, + 302, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 107, + 302, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 107, + 302, + 144 + ], + "type": "text", + "content": "[339] N. Deng, Z. Sun, R. He, A. Sikka, Y. Chen, L. Ma, Y. Zhang, and R. Mihalcea, \"Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data,\" CoRR, vol. abs/2402.12424, 2024. 21" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 144, + 302, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 302, + 161 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 302, + 161 + ], + "type": "text", + "content": "[340] Z.-H. Zhou, \"Open-environment machine learning,\" National Science Review, vol. 9, no. 8, p. nwac123, 07 2022. 21" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 162, + 302, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 162, + 302, + 189 + ], + "spans": [ + { + "bbox": [ + 46, + 162, + 302, + 189 + ], + "type": "text", + "content": "[341] W. Ren, X. Li, H. Chen, V. Rakesh, Z. Wang, M. Das, and V. G. Honavar, \"Tablog: Test-time adaptation for tabular data using logic rules,\" in ICML, 2024, pp. 42417-42427. 21" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 189, + 302, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 189, + 302, + 199 + ], + "spans": [ + { + "bbox": [ + 46, + 189, + 302, + 199 + ], + "type": "text", + "content": "[342] J. Kaplan, S. McCandlish, T. Henighan, T. B. Brown, B. Chess," + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 43, + 566, + 198 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 331, + 43, + 566, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 43, + 566, + 71 + ], + "spans": [ + { + "bbox": [ + 331, + 43, + 566, + 71 + ], + "type": "text", + "content": "R. Child, S. Gray, A. Radford, J. Wu, and D. Amodei, \"Scaling laws for neural language models,\" CoRR, vol. abs/2001.08361, 2020. 21" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 72, + 566, + 98 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 566, + 98 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 566, + 98 + ], + "type": "text", + "content": "[343] Z.-H. Zhou, \"Learnware: on the future of machine learning,\" Frontiers of Computer Science, vol. 10, no. 4, pp. 589-590, 2016. 21" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 99, + 566, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 99, + 566, + 116 + ], + "spans": [ + { + "bbox": [ + 310, + 99, + 566, + 116 + ], + "type": "text", + "content": "[344] Z.-H. Zhou and Z.-H. Tan, \"Learnware: small models do big,\" Science China Information Science, vol. 67, no. 1, 2024. 21" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 117, + 566, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 117, + 566, + 143 + ], + "spans": [ + { + "bbox": [ + 310, + 117, + 566, + 143 + ], + "type": "text", + "content": "[345] Y. Hu, I. Fountalis, J. Tian, and N. Vasiloglou, \"Annotatedtables: A large tabular dataset with language model annotations,\" CoRR, vol. abs/2406.16349, 2024. 21" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 144, + 566, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 144, + 566, + 171 + ], + "spans": [ + { + "bbox": [ + 310, + 144, + 566, + 171 + ], + "type": "text", + "content": "[346] Z.-H. Zhou, \"Learnability with time-sharing computational resource concerns,\" National Science Review, vol. 11, no. 10, p. nwae204, 06 2024. 22" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 171, + 566, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 171, + 566, + 198 + ], + "spans": [ + { + "bbox": [ + 310, + 171, + 566, + 198 + ], + "type": "text", + "content": "[347] W. Liang, Y. Zhang, Y. Kwon, S. Yeung, and J. Y. Zou, \"Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning,\" in NeurIPS, 2022. 22" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 251, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX 20XX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file